hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
blocksparse_l2_norm_op_gpu.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ew_op_gpu.h"
__device__ __forceinline__ int div16(int numerator, int magic, int shift)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, 0;" : "=r"(res) : "r"(numerator), "r"(magic));
return res >> shift;
}
__device__ __forceinline__ int mod16(int numerator, int div, int maxdiv)
{
int res;
asm("vmad.s32.u32.u32 %0, -%1.h0, %2.h0, %3;" : "=r"(res) : "r"(div), "r"(maxdiv), "r"(numerator));
return res;
}
__device__ __forceinline__ int mad16(int a, int b, int c)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(c));
return res;
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_KCTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
Y += offset;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X1);
X1 += 32;
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += __shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X2);
store(Y, x * rnorm);
X2 += 32;
Y += 32;
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CKTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
Y += offset_F;
// y_val = sum(x**2)
float sum_sqr_x = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X1, offset);
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += __shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X2, offset);
store(Y, x * rnorm, offset);
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_CK_32(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int iShare[]; // 96 + max(lut_size)
extern __shared__ float fShare[]; // 96 + max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
iShare[i + 96] = Lut[i] * 32 * 32;
__syncthreads();
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X1 = X + iShare[i + 96] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X1, j*128);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 warps
if (tid >= 32)
fShare[tid-32] = sum_sqr_x;
__syncthreads();
if (tid < 32)
{
sum_sqr_x += fShare[tid] + fShare[tid + 32] + fShare[tid + 64];
fShare[tid] = sum_sqr_x;
// store reduction for gradient pass
store(S, sum_sqr_x, k);
}
__syncthreads();
// get the final reduced value for all warps:
sum_sqr_x = fShare[tid & 31];
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = iShare[i + 96];
const TX* X2 = X + block_offset + tid;
TY* Y2 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X2, j*128);
store(Y2, x * rnorm, j*128);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_16(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += __shfl_xor(sum_sqr_x, 16);
store(S, sum_sqr_x, k, tid < 16);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
store(Y0, x * rnorm, j*32);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_8(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
sum_sqr_x += x0 * x0 + x1 * x1;
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += __shfl_xor(sum_sqr_x, 16);
sum_sqr_x += __shfl_xor(sum_sqr_x, 8);
store(S, sum_sqr_x, k, tid < 8);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
store(Y0, x0 * rnorm, 0*32);
store(Y0, x1 * rnorm, 1*32);
}
}
template <typename TY, typename TX>
bool L2NormalizeKCTRS(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_KCTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, y, sum_sqr_x, x, g, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCKTRS(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CKTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, y, sum_sqr_x, x, g, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCK(hipStream_t stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_32<TY,TX>), dim3(grid), dim3(block), shared+96*4, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_16<TY,TX>), dim3(grid), dim3(block), shared, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_CK_8<TY,TX>), dim3(grid), dim3(block), shared, stream, y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
return true; // TODO
}
/////////////////////////////////////// Gradients ///////////////////////////////////////////
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_KCTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
const TY* DY1 = DY + offset;
const TY* DY2 = DY + offset;
DX += offset;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY1);
float x = load(X1);
DY1 += 32;
X1 += 32;
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += __shfl_xor(red_val, i);
dg += __shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY2);
float x = load(X2);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, 0);
DY2 += 32;
X2 += 32;
DX += 32;
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CKTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
const TY* DY1 = DY + offset_F;
const TY* DY2 = DY + offset_F;
DX += offset_F;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X1, offset);
float dy = load(DY1, offset);
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += __shfl_xor(red_val, i);
dg += __shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X2, offset);
float dy = load(DY2, offset);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, offset);
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_grad_CK_32(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ float fShare[]; // 96*2 + max(lut_size)
extern __shared__ int iShare[]; // 96*2 + max(lut_size)
float* redShare1 = &fShare[96*0];
float* redShare2 = &fShare[96*1];
int* lutShare = &iShare[96*2];
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
lutShare[i] = Lut[i] * 32 * 32;
__syncthreads();
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*128);
float dy = load(DY1, j*128);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val across the 4 warps
if (tid >= 32)
{
redShare1[tid-32] = red_val;
redShare2[tid-32] = dg;
}
__syncthreads();
if (tid < 32)
{
red_val += redShare1[tid] + redShare1[tid + 32] + redShare1[tid + 64];
dg += redShare2[tid] + redShare2[tid + 32] + redShare2[tid + 64];
redShare1[tid] = red_val;
if (apply_gain)
DG[k] = dg;
}
__syncthreads();
// get the final reduced value for all warps:
red_val = redShare1[tid & 31];
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*128);
float dy = load(DY2, j*128);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*128);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_16(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += __shfl_xor(red_val, 16);
dg += __shfl_xor(dg, 16);
store(DG, dg, k, apply_gain && tid < 16);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_8(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += __shfl_xor(red_val, 16);
dg += __shfl_xor(dg, 16);
red_val += __shfl_xor(red_val, 8);
dg += __shfl_xor(dg, 8);
store(DG, dg, k, apply_gain && tid < 8);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
template <typename TY, typename TX>
bool L2NormalizeGradKCTRS(hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_KCTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCKTRS(hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CKTRS<TY,TX>), dim3(grid), dim3(block), 0, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCK (hipStream_t stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_32<TY,TX>), dim3(grid), dim3(block), shared+96*2*4, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_16<TY,TX>), dim3(grid), dim3(block), shared, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
hipLaunchKernelGGL(( l2_normalize_grad_CK_8<TY,TX>), dim3(grid), dim3(block), shared, stream, grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
return true; // TODO
}
template bool L2NormalizeKCTRS<float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <float, float>(hipStream_t stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <float, float>(hipStream_t stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, ehalf>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, ehalf>(hipStream_t stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, float>(hipStream_t stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, bhalf>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, bhalf>(hipStream_t stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, float>(hipStream_t stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, float>(hipStream_t stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
#endif // GOOGLE_CUDA
| blocksparse_l2_norm_op_gpu.cu |
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "ew_op_gpu.h"
__device__ __forceinline__ int div16(int numerator, int magic, int shift)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, 0;" : "=r"(res) : "r"(numerator), "r"(magic));
return res >> shift;
}
__device__ __forceinline__ int mod16(int numerator, int div, int maxdiv)
{
int res;
asm("vmad.s32.u32.u32 %0, -%1.h0, %2.h0, %3;" : "=r"(res) : "r"(div), "r"(maxdiv), "r"(numerator));
return res;
}
__device__ __forceinline__ int mad16(int a, int b, int c)
{
int res;
asm("vmad.s32.u32.u32 %0, %1.h0, %2.h0, %3;" : "=r"(res) : "r"(a), "r"(b), "r"(c));
return res;
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_KCTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
Y += offset;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X1);
X1 += 32;
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += __shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int i = tid; i < CTRS; i += 32)
{
float x = load(X2);
store(Y, x * rnorm);
X2 += 32;
Y += 32;
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CKTRS(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
Y += offset_F;
// y_val = sum(x**2)
float sum_sqr_x = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X1, offset);
sum_sqr_x += x * x;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
sum_sqr_x += __shfl_xor(sum_sqr_x, i);
// store reduction for gradient pass
if (tid == 0)
store(S, sum_sqr_x, k);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load(X2, offset);
store(Y, x * rnorm, offset);
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_CK_32(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int iShare[]; // 96 + max(lut_size)
extern __shared__ float fShare[]; // 96 + max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
iShare[i + 96] = Lut[i] * 32 * 32;
__syncthreads();
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X1 = X + iShare[i + 96] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X1, j*128);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 warps
if (tid >= 32)
fShare[tid-32] = sum_sqr_x;
__syncthreads();
if (tid < 32)
{
sum_sqr_x += fShare[tid] + fShare[tid + 32] + fShare[tid + 64];
fShare[tid] = sum_sqr_x;
// store reduction for gradient pass
store(S, sum_sqr_x, k);
}
__syncthreads();
// get the final reduced value for all warps:
sum_sqr_x = fShare[tid & 31];
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = iShare[i + 96];
const TX* X2 = X + block_offset + tid;
TY* Y2 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X2, j*128);
store(Y2, x * rnorm, j*128);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_16(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
sum_sqr_x += x * x;
}
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += __shfl_xor(sum_sqr_x, 16);
store(S, sum_sqr_x, k, tid < 16);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load(X0, j*32);
store(Y0, x * rnorm, j*32);
}
}
}
// y = gain * x / sqrt(max(sum(x**2), epsilon))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_CK_8(
TY* Y,
float* S,
const TX* __restrict__ X,
const float* __restrict__ G,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
// sum_sqr_x = sum(x**2)
float sum_sqr_x = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
const TX* X0 = X + lut[i] + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
sum_sqr_x += x0 * x0 + x1 * x1;
}
// reduce sum_sqr_x across the 4 rows of the warp
sum_sqr_x += __shfl_xor(sum_sqr_x, 16);
sum_sqr_x += __shfl_xor(sum_sqr_x, 8);
store(S, sum_sqr_x, k, tid < 8);
// rnorm = 1.0f / sqrt(max(sum_sqr_x, epsilon)) * gain
float rnorm = rsqrtf(fmaxf(sum_sqr_x, epsilon)) * gain;
// y = x * rnorm
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int block_offset = lut[i];
const TX* X0 = X + block_offset + tid;
TY* Y0 = Y + block_offset + tid;
float x0 = load(X0, 0*32);
float x1 = load(X0, 1*32);
store(Y0, x0 * rnorm, 0*32);
store(Y0, x1 * rnorm, 1*32);
}
}
template <typename TY, typename TX>
bool L2NormalizeKCTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_KCTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCKTRS(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CKTRS<TY,TX><<<grid, block, 0, stream>>>(y, sum_sqr_x, x, g, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeCK(CUstream stream, TY* y, float* sum_sqr_x, const TX* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
l2_normalize_CK_32<TY,TX><<<grid, block, shared+96*4, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CK_16<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_CK_8<TY,TX><<<grid, block, shared, stream>>>(y, sum_sqr_x, x, g, lut, epsilon, g != 0);
}
return true; // TODO
}
/////////////////////////////////////// Gradients ///////////////////////////////////////////
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_KCTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int2* __restrict__ Lut,
float epsilon, int apply_gain)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int2 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int offset = block_data.x + tid; // block_F + idx_k * CTRS + tid
int CTRS = block_data.y; // block_C * TRS
const TX* X1 = X + offset;
const TX* X2 = X + offset;
const TY* DY1 = DY + offset;
const TY* DY2 = DY + offset;
DX += offset;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY1);
float x = load(X1);
DY1 += 32;
X1 += 32;
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += __shfl_xor(red_val, i);
dg += __shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int i = tid; i < CTRS; i += 32)
{
float dy = load(DY2);
float x = load(X2);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, 0);
DY2 += 32;
X2 += 32;
DX += 32;
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CKTRS(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int4* __restrict__ Lut,
float epsilon, int apply_gain, int TRS, int magic_TRS, int shift_TRS)
{
int tid = threadIdx.x;
int k = blockIdx.x;
int4 block_data = Lut[k];
float gain = 1.0f;
if (apply_gain) gain = G[k];
int idx_k = block_data.x;
int CTRS = block_data.y;
int KTRS = block_data.z;
int block_F = block_data.w;
int offset_F = block_F + idx_k * TRS;
const TX* X1 = X + offset_F;
const TX* X2 = X + offset_F;
const TY* DY1 = DY + offset_F;
const TY* DY2 = DY + offset_F;
DX += offset_F;
float sum_sqr_x = S[k];
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
// sum(-d * x / norm_x**2)
float red_val = 0.0f;
float dg = 0.0f;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
// c = i / TRS;
// trs = i % TRS;
// offset = c * KTRS + trs
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X1, offset);
float dy = load(DY1, offset);
dg += dy * x * norm_xi;
red_val += (-dy * x * gain) * norm_x2i;
}
#pragma unroll
for (int i = 16; i > 0; i >>= 1)
{
red_val += __shfl_xor(red_val, i);
dg += __shfl_xor(dg, i);
}
if (apply_gain && tid == 0)
DG[k] = dg;
red_val *= sum_sqr_x >= epsilon;
for (int ctrs = tid; ctrs < CTRS; ctrs += 32)
{
int c = div16(ctrs, magic_TRS, shift_TRS);
int trs = mod16(ctrs, c, TRS);
int offset = mad16(c, KTRS, trs);
float x = load( X2, offset);
float dy = load(DY2, offset);
float dx = dy * gain + x * red_val;
store(DX, dx * norm_xi, offset);
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(128) l2_normalize_grad_CK_32(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ float fShare[]; // 96*2 + max(lut_size)
extern __shared__ int iShare[]; // 96*2 + max(lut_size)
float* redShare1 = &fShare[96*0];
float* redShare2 = &fShare[96*1];
int* lutShare = &iShare[96*2];
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*32 + (tid & 31);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 128)
lutShare[i] = Lut[i] * 32 * 32;
__syncthreads();
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*128);
float dy = load(DY1, j*128);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val across the 4 warps
if (tid >= 32)
{
redShare1[tid-32] = red_val;
redShare2[tid-32] = dg;
}
__syncthreads();
if (tid < 32)
{
red_val += redShare1[tid] + redShare1[tid + 32] + redShare1[tid + 64];
dg += redShare2[tid] + redShare2[tid + 32] + redShare2[tid + 64];
redShare1[tid] = red_val;
if (apply_gain)
DG[k] = dg;
}
__syncthreads();
// get the final reduced value for all warps:
red_val = redShare1[tid & 31];
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lutShare[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*128);
float dy = load(DY2, j*128);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*128);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_16(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*16 + (tid & 15);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 16 * 16;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += __shfl_xor(red_val, 16);
dg += __shfl_xor(dg, 16);
store(DG, dg, k, apply_gain && tid < 16);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 8; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
// sum_sqr_x = sum(x**2)
// norm_x = sqrt(maximum(sum_sqr_x, epsilon))
// grad_x = ( grad_y*g + x * (sum_sqr_x >= epsilon) * sum(-grad_y*g * x / norm_x**2) ) / norm_x
// grad_g = sum(grad_y * l2_norm(x))
template <typename TY, typename TX>
__global__ void __launch_bounds__(32) l2_normalize_grad_CK_8(
TX* DX,
float* DG,
const TY* __restrict__ DY,
const TX* __restrict__ X,
const float* __restrict__ G,
const float* __restrict__ S,
const int* __restrict__ Lut,
float epsilon, int apply_gain)
{
extern __shared__ int lut[]; // max(lut_size)
int tid = threadIdx.x;
int idx_L = blockIdx.x;
int4 lut_head = ((const int4*)Lut)[idx_L];
// unpack lut header
int lut_offset = lut_head.x;
int lut_size = lut_head.y;
int idx_K = lut_head.z;
int k = idx_K*8 + (tid & 7);
float gain = 1.0f;
if (apply_gain) gain = G[k];
float sum_sqr_x = S[k];
Lut += lut_offset;
#pragma unroll 1
for (int i = tid; i < lut_size; i += 32)
lut[i] = Lut[i] * 8 * 8;
float max_sum_sqr_x = fmaxf(sum_sqr_x, epsilon);
float norm_xi = rsqrtf(max_sum_sqr_x);
float norm_x2i = 1.0f / max_sum_sqr_x;
float red_val = 0.0f;
float dg = 0.0f;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
const TY* DY1 = DY + offset;
const TX* X1 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X1, j*32);
float dy = load(DY1, j*32);
red_val += (-dy * gain * x) * norm_x2i;
dg += dy * x * norm_xi;
}
}
// reduce red_val,dg across the 4 rows of the warp
red_val += __shfl_xor(red_val, 16);
dg += __shfl_xor(dg, 16);
red_val += __shfl_xor(red_val, 8);
dg += __shfl_xor(dg, 8);
store(DG, dg, k, apply_gain && tid < 8);
red_val *= sum_sqr_x >= epsilon;
#pragma unroll 1
for (int i = 0; i < lut_size; i++)
{
int offset = lut[i] + tid;
TX* DX2 = DX + offset;
const TY* DY2 = DY + offset;
const TX* X2 = X + offset;
#pragma unroll
for (int j = 0; j < 2; j++)
{
float x = load( X2, j*32);
float dy = load(DY2, j*32);
float dx = dy * gain + x * red_val;
store(DX2, dx * norm_xi, j*32);
}
}
}
template <typename TY, typename TX>
bool L2NormalizeGradKCTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_KCTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int2*)lut, epsilon, g != 0);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCKTRS(CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS)
{
dim3 grid(K, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CKTRS<TY,TX><<<grid, block, 0, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, (const int4*)lut, epsilon, g != 0, TRS, magic_TRS, shift_TRS);
return true; // TODO
}
template <typename TY, typename TX>
bool L2NormalizeGradCK (CUstream stream, TX* grad_x, float* grad_g, const TY* grad_y, const TX* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize)
{
if (bsize == 32)
{
dim3 grid(K>>5, 1, 1);
dim3 block(128, 1, 1);
l2_normalize_grad_CK_32<TY,TX><<<grid, block, shared+96*2*4, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else if (bsize == 16)
{
dim3 grid(K>>4, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CK_16<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
else // if (bsize == 8)
{
dim3 grid(K>>3, 1, 1);
dim3 block(32, 1, 1);
l2_normalize_grad_CK_8<TY,TX><<<grid, block, shared, stream>>>(grad_x, grad_g, grad_y, x, g, sum_sqr_x_p, lut, epsilon, g != 0);
}
return true; // TODO
}
template bool L2NormalizeKCTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <float, float>(CUstream stream, float* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <float, float>(CUstream stream, float* grad_x, float* grad_g, const float* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, ehalf>(CUstream stream, ehalf* y, float* sum_sqr_x, const ehalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, ehalf>(CUstream stream, ehalf* grad_x, float* grad_g, const ehalf* grad_y, const ehalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <ehalf, float>(CUstream stream, ehalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <ehalf, float>(CUstream stream, float* grad_x, float* grad_g, const ehalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, bhalf>(CUstream stream, bhalf* y, float* sum_sqr_x, const bhalf* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, bhalf>(CUstream stream, bhalf* grad_x, float* grad_g, const bhalf* grad_y, const bhalf* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeKCTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K);
template bool L2NormalizeCKTRS<bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeCK <bhalf, float>(CUstream stream, bhalf* y, float* sum_sqr_x, const float* x, const float* g, const int* lut, float epsilon, int K, int shared, int bsize);
template bool L2NormalizeGradKCTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K);
template bool L2NormalizeGradCKTRS<bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int TRS, int magic_TRS, int shift_TRS);
template bool L2NormalizeGradCK <bhalf, float>(CUstream stream, float* grad_x, float* grad_g, const bhalf* grad_y, const float* x, const float* g, const float* sum_sqr_x_p, const int* lut, float epsilon, int K, int shared, int bsize);
#endif // GOOGLE_CUDA
|
ea917eb8b28da1658b166068c6eac1d2ca0698ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Eigen/Dense>
#include "DataFormats/CaloRecHit/interface/MultifitComputations.h"
// needed to compile with USER_CXXFLAGS="-DCOMPUTE_TDC_TIME"
#include "DataFormats/HcalRecHit/interface/HcalSpecialTimes.h"
#include "FWCore/Utilities/interface/CMSUnrollLoop.h"
// TODO reuse some of the HCAL constats from
//#include "RecoLocalCalo/HcalRecAlgos/interface/HcalConstants.h"
#include "SimpleAlgoGPU.h"
#include "KernelHelpers.h"
#ifdef HCAL_MAHI_GPUDEBUG
#define DETID_TO_DEBUG 1125647428
#endif
namespace hcal {
namespace mahi {
// TODO: provide constants from configuration
// from RecoLocalCalo/HcalRecProducers/python/HBHEMahiParameters_cfi.py
constexpr int nMaxItersMin = 50;
constexpr int nMaxItersNNLS = 500;
constexpr double nnlsThresh = 1e-11;
constexpr float deltaChi2Threashold = 1e-3;
// from RecoLocalCalo/HcalRecProducers/src/HBHEPhase1Reconstructor.cc
__forceinline__ __device__ float get_raw_charge(double const charge,
double const pedestal,
float const* shrChargeMinusPedestal,
float const* parLin1Values,
float const* parLin2Values,
float const* parLin3Values,
int32_t const nsamplesForCompute,
int32_t const soi,
int const sipmQTSShift,
int const sipmQNTStoSum,
int const sipmType,
float const fcByPE,
bool const isqie11) {
float rawCharge;
if (!isqie11)
rawCharge = charge;
else {
auto const parLin1 = parLin1Values[sipmType - 1];
auto const parLin2 = parLin2Values[sipmType - 1];
auto const parLin3 = parLin3Values[sipmType - 1];
int const first = ::max(soi + sipmQTSShift, 0);
int const last = ::min(soi + sipmQNTStoSum, nsamplesForCompute);
float sipmq = 0.0f;
for (auto ts = first; ts < last; ts++)
sipmq += shrChargeMinusPedestal[threadIdx.y * nsamplesForCompute + ts];
auto const effectivePixelsFired = sipmq / fcByPE;
auto const factor =
hcal::reconstruction::compute_reco_correction_factor(parLin1, parLin2, parLin3, effectivePixelsFired);
rawCharge = (charge - pedestal) * factor + pedestal;
#ifdef HCAL_MAHI_GPUDEBUG
printf("first = %d last = %d sipmQ = %f factor = %f rawCharge = %f\n", first, last, sipmq, factor, rawCharge);
#endif
}
return rawCharge;
}
// Assume: same number of samples for HB and HE
// TODO: add/validate restrict (will increase #registers in use by the kernel)
__global__ void kernel_prep1d_sameNumberOfSamples(float* amplitudes,
float* noiseTerms,
float* electronicNoiseTerms,
float* outputEnergy,
float* outputChi2,
uint16_t const* dataf01HE,
uint16_t const* dataf5HB,
uint16_t const* dataf3HB,
uint32_t const* idsf01HE,
uint32_t const* idsf5HB,
uint32_t const* idsf3HB,
uint32_t const stridef01HE,
uint32_t const stridef5HB,
uint32_t const stridef3HB,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint8_t const* npresamplesf5HB,
int8_t* soiSamples,
float* method0Energy,
float* method0Time,
uint32_t* outputdid,
uint32_t const nchannels,
uint32_t const* recoParam1Values,
uint32_t const* recoParam2Values,
float const* qieCoderOffsets,
float const* qieCoderSlopes,
int const* qieTypes,
float const* pedestalWidths,
float const* effectivePedestalWidths,
float const* pedestals,
float const* effectivePedestals,
bool const useEffectivePedestals,
int const* sipmTypeValues,
float const* fcByPEValues,
float const* parLin1Values,
float const* parLin2Values,
float const* parLin3Values,
float const* gainValues,
float const* respCorrectionValues,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE,
int const sipmQTSShift,
int const sipmQNTStoSum,
int const firstSampleShift,
uint32_t const offsetForHashes,
float const ts4Thresh,
int const startingSample) {
// indices + runtime constants
auto const sample = threadIdx.x + startingSample;
auto const sampleWithinWindow = threadIdx.x;
int32_t const nsamplesForCompute = blockDim.x;
auto const lch = threadIdx.y;
auto const gch = lch + blockDim.y * blockIdx.x;
auto const nchannels_per_block = blockDim.y;
auto const linearThPerBlock = threadIdx.x + threadIdx.y * blockDim.x;
// remove
if (gch >= nchannels)
return;
// initialize all output buffers
if (sampleWithinWindow == 0) {
outputdid[gch] = 0;
method0Energy[gch] = 0;
method0Time[gch] = 0;
outputEnergy[gch] = 0;
outputChi2[gch] = 0;
}
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_SINGLECHANNEL
if (gch > 0)
return;
#endif
#endif
// configure shared mem
extern __shared__ char smem[];
float* shrEnergyM0PerTS = reinterpret_cast<float*>(smem);
float* shrChargeMinusPedestal = shrEnergyM0PerTS + nsamplesForCompute * nchannels_per_block;
float* shrMethod0EnergyAccum = shrChargeMinusPedestal + nsamplesForCompute * nchannels_per_block;
float* shrEnergyM0TotalAccum = shrMethod0EnergyAccum + nchannels_per_block;
unsigned long long int* shrMethod0EnergySamplePair =
reinterpret_cast<unsigned long long int*>(shrEnergyM0TotalAccum + nchannels_per_block);
if (sampleWithinWindow == 0) {
shrMethod0EnergyAccum[lch] = 0;
shrMethod0EnergySamplePair[lch] = __float_as_uint(std::numeric_limits<float>::min());
shrEnergyM0TotalAccum[lch] = 0;
}
// offset output
auto* amplitudesForChannel = amplitudes + nsamplesForCompute * gch;
auto* noiseTermsForChannel = noiseTerms + nsamplesForCompute * gch;
auto* electronicNoiseTermsForChannel = electronicNoiseTerms + nsamplesForCompute * gch;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
// get event input quantities
auto const stride = gch < nchannelsf01HE ? stridef01HE : (gch < nchannelsf015 ? stridef5HB : stridef3HB);
auto const nsamples = gch < nchannelsf01HE ? compute_nsamples<Flavor1>(stride)
: (gch < nchannelsf015 ? compute_nsamples<Flavor5>(stride)
: compute_nsamples<Flavor3>(stride));
#ifdef HCAL_MAHI_GPUDEBUG
assert(nsamples == nsamplesForCompute || nsamples - startingSample == nsamplesForCompute);
#endif
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
auto const did = HcalDetId{id};
auto const adc =
gch < nchannelsf01HE
? adc_for_sample<Flavor1>(dataf01HE + stride * gch, sample)
: (gch < nchannelsf015 ? adc_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample)
: adc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
auto const capid =
gch < nchannelsf01HE
? capid_for_sample<Flavor1>(dataf01HE + stride * gch, sample)
: (gch < nchannelsf015 ? capid_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample)
: capid_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
// compute hash for this did
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
// conditions based on the hash
// FIXME: remove hardcoded values
auto const qieType = qieTypes[hashedId] > 0 ? 1 : 0; // 2 types at this point
auto const* qieOffsets = qieCoderOffsets + hashedId * HcalQIECodersGPU::numValuesPerChannel;
auto const* qieSlopes = qieCoderSlopes + hashedId * HcalQIECodersGPU::numValuesPerChannel;
auto const* pedestalsForChannel = pedestals + hashedId * 4;
auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestalWidths + hashedId * 4
: pedestalWidths + hashedId * 4;
auto const* gains = gainValues + hashedId * 4;
auto const gain = gains[capid];
auto const gain0 = gains[0];
auto const respCorrection = respCorrectionValues[hashedId];
auto const pedestal = pedestalsForChannel[capid];
auto const pedestalWidth = pedestalWidthsForChannel[capid];
// if needed, only use effective pedestals for f01
auto const pedestalToUseForMethod0 = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestals[hashedId * 4 + capid]
: pedestal;
auto const sipmType = sipmTypeValues[hashedId];
auto const fcByPE = fcByPEValues[hashedId];
auto const recoParam1 = recoParam1Values[hashedId];
auto const recoParam2 = recoParam2Values[hashedId];
#ifdef HCAL_MAHI_GPUDEBUG
printf("qieType = %d qieOffset0 = %f qieOffset1 = %f qieSlope0 = %f qieSlope1 = %f\n",
qieType,
qieOffsets[0],
qieOffsets[1],
qieSlopes[0],
qieSlopes[1]);
#endif
// compute charge
auto const charge = hcal::reconstruction::compute_coder_charge(qieType, adc, capid, qieOffsets, qieSlopes);
shrChargeMinusPedestal[linearThPerBlock] = charge - pedestal;
if (gch < nchannelsf01HE) {
// NOTE: assume that soi is high only for a single guy!
// which must be the case. cpu version does not check for that
// if that is not the case, we will see that with cuda mmecheck
auto const soibit = soibit_for_sample<Flavor1>(dataf01HE + stride * gch, sample);
if (soibit == 1)
soiSamples[gch] = sampleWithinWindow;
} else if (gch >= nchannelsf015) {
auto const soibit = soibit_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample);
if (soibit == 1)
soiSamples[gch] = sampleWithinWindow;
}
__syncthreads();
int32_t const soi = gch < nchannelsf01HE
? soiSamples[gch]
: (gch < nchannelsf015 ? npresamplesf5HB[gch - nchannelsf01HE] : soiSamples[gch]);
//int32_t const soi = gch >= nchannelsf01HE
// ? npresamplesf5HB[gch - nchannelsf01HE]
// : soiSamples[gch];
// this is here just to make things uniform...
if (gch >= nchannelsf01HE && gch < nchannelsf015 && sampleWithinWindow == 0)
soiSamples[gch] = npresamplesf5HB[gch - nchannelsf01HE];
//
// compute various quantities (raw charge and tdc stuff)
// NOTE: this branch will be divergent only for a single warp that
// sits on the boundary when flavor 01 channels end and flavor 5 start
//
float const rawCharge = get_raw_charge(charge,
pedestal,
shrChargeMinusPedestal,
parLin1Values,
parLin2Values,
parLin3Values,
nsamplesForCompute,
soi,
sipmQTSShift,
sipmQNTStoSum,
sipmType,
fcByPE,
gch < nchannelsf01HE || gch >= nchannelsf015);
auto const dfc = hcal::reconstruction::compute_diff_charge_gain(
qieType, adc, capid, qieOffsets, qieSlopes, gch < nchannelsf01HE || gch >= nchannelsf015);
#ifdef COMPUTE_TDC_TIME
float tdcTime;
if (gch >= nchannelsf01HE && gch < nchannelsf015) {
tdcTime = HcalSpecialTimes::UNKNOWN_T_NOTDC;
} else {
if (gch < nchannelsf01HE)
tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor1>(dataf01HE + stride * gch, sample));
else if (gch >= nchannelsf015)
tdcTime =
HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
}
#endif // COMPUTE_TDC_TIME
// compute method 0 quantities
// TODO: need to apply containment
// TODO: need to apply time slew
// TODO: for < run 3, apply HBM legacy energy correction
auto const nsamplesToAdd = recoParam1 < 10 ? recoParam2 : (recoParam1 >> 14) & 0xF;
auto const startSampleTmp = soi + firstSampleShift;
auto const startSample = startSampleTmp < 0 ? 0 : startSampleTmp;
auto const endSample =
startSample + nsamplesToAdd < nsamplesForCompute ? startSample + nsamplesToAdd : nsamplesForCompute;
// NOTE: gain is a small number < 10^-3, multiply it last
auto const energym0_per_ts = gain * ((rawCharge - pedestalToUseForMethod0) * respCorrection);
auto const energym0_per_ts_gain0 = gain0 * ((rawCharge - pedestalToUseForMethod0) * respCorrection);
// store to shared mem
shrEnergyM0PerTS[lch * nsamplesForCompute + sampleWithinWindow] = energym0_per_ts;
atomicAdd(&shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0);
#ifdef HCAL_MAHI_GPUDEBUG
printf(
"id = %u sample = %d gch = %d hashedId = %u adc = %u capid = %u\n"
" charge = %f rawCharge = %f dfc = %f pedestal = %f\n"
" gain = %f respCorrection = %f energym0_per_ts = %f\n",
id,
sample,
gch,
hashedId,
adc,
capid,
charge,
rawCharge,
dfc,
pedestalToUseForMethod0,
gain,
respCorrection,
energym0_per_ts);
printf(
"startSample = %d endSample = %d param1 = %u param2 = %u\n", startSample, endSample, recoParam1, recoParam2);
#endif
if (sampleWithinWindow >= startSample && sampleWithinWindow < endSample) {
atomicAdd(&shrMethod0EnergyAccum[lch], energym0_per_ts);
// pack sample, energy as 64 bit value
unsigned long long int old = shrMethod0EnergySamplePair[lch], assumed;
unsigned long long int val =
(static_cast<unsigned long long int>(sampleWithinWindow) << 32) + __float_as_uint(energym0_per_ts);
do {
assumed = old;
// decode energy, sample values
//int const current_sample = (assumed >> 32) & 0xffffffff;
float const current_energy = __uint_as_float(assumed & 0xffffffff);
if (energym0_per_ts > current_energy)
old = atomicCAS(&shrMethod0EnergySamplePair[lch], assumed, val);
else
break;
} while (assumed != old);
}
__syncthreads();
// NOTE: must take soi, as values for that thread are used...
if (sampleWithinWindow == soi) {
auto const method0_energy = shrMethod0EnergyAccum[lch];
auto const val = shrMethod0EnergySamplePair[lch];
int const max_sample = (val >> 32) & 0xffffffff;
float const max_energy = __uint_as_float(val & 0xffffffff);
float const max_energy_1 =
max_sample < nsamplesForCompute - 1 ? shrEnergyM0PerTS[lch * nsamplesForCompute + max_sample + 1] : 0.f;
float const position = nsamplesToAdd < nsamplesForCompute ? max_sample - soi : max_sample;
auto const sum = max_energy + max_energy_1;
// FIXME: for full comparison with cpu method 0 timing,
// need to correct by slew
// requires an accumulator -> more shared mem -> omit here unless
// really needed
float const time =
max_energy > 0.f && max_energy_1 > 0.f ? 25.f * (position + max_energy_1 / sum) : 25.f * position;
// store method0 quantities to global mem
outputdid[gch] = id;
method0Energy[gch] = method0_energy;
method0Time[gch] = time;
#ifdef HCAL_MAHI_GPUDEBUG
printf("tsTOT = %f tstrig = %f ts4Thresh = %f\n", shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0, ts4Thresh);
#endif
// check as in cpu version if mahi is not needed
// FIXME: KNOWN ISSUE: observed a problem when rawCharge and pedestal
// are basically equal and generate -0.00000...
// needs to be treated properly
if (!(shrEnergyM0TotalAccum[lch] > 0 && energym0_per_ts_gain0 > ts4Thresh)) {
// do not need to run mahi minimization
//outputEnergy[gch] = 0; energy already inited to 0
outputChi2[gch] = -9999.f;
}
#ifdef HCAL_MAHI_GPUDEBUG
printf("method0_energy = %f max_sample = %d max_energy = %f time = %f\n",
method0_energy,
max_sample,
max_energy,
time);
#endif
}
//
// preparations for mahi fit
//
auto const amplitude = rawCharge - pedestalToUseForMethod0;
auto const noiseADC = (1. / std::sqrt(12)) * dfc;
auto const noisePhotoSq = amplitude > pedestalWidth ? (amplitude * fcByPE) : 0.f;
auto const noiseTerm = noiseADC * noiseADC + noisePhotoSq + pedestalWidth * pedestalWidth;
#ifdef HCAL_MAHI_GPUDEBUG
printf(
"charrge(%d) = %f pedestal(%d) = %f dfc(%d) = %f pedestalWidth(%d) = %f noiseADC(%d) = %f noisPhoto(%d) = "
"%f\n",
sample,
rawCharge,
sample,
pedestalToUseForMethod0,
sample,
dfc,
sample,
pedestalWidth,
sample,
noiseADC,
sample,
noisePhotoSq);
#endif
// store to global memory
amplitudesForChannel[sampleWithinWindow] = amplitude;
noiseTermsForChannel[sampleWithinWindow] = noiseTerm;
electronicNoiseTermsForChannel[sampleWithinWindow] = pedestalWidth;
}
// TODO: need to add an array of offsets for pulses (a la activeBXs...)
// Assume for now 8 pulses
__global__ void kernel_prep_pulseMatrices_sameNumberOfSamples(float* pulseMatrices,
float* pulseMatricesM,
float* pulseMatricesP,
int const* pulseOffsets,
float const* amplitudes,
uint32_t const* idsf01HE,
uint32_t const* idsf5HB,
uint32_t const* idsf3HB,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint32_t const nchannelsTotal,
int8_t const* soiSamples,
uint32_t const* recoPulseShapeIds,
float const* acc25nsVecValues,
float const* diff25nsItvlVecValues,
float const* accVarLenIdxMinusOneVecValues,
float const* diffVarItvlIdxMinusOneVecValues,
float const* accVarLenIdxZeroVecValues,
float const* diffVarItvlIdxZeroVecValues,
float const meanTime,
float const timeSigmaSiPM,
float const timeSigmaHPD,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE,
uint32_t const offsetForHashes,
bool const applyTimeSlew,
float const tzeroTimeSlew,
float const slopeTimeSlew,
float const tmaxTimeSlew) {
// indices
auto const ipulse = threadIdx.y;
auto const npulses = blockDim.y;
auto const sample = threadIdx.x;
auto const nsamples = blockDim.x;
auto const lch = threadIdx.z;
auto const gch = lch + blockIdx.x * blockDim.z;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
if (gch >= nchannelsTotal)
return;
// conditions
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
//auto const id = gch >= nchannelsf01HE
// ? idsf5HB[gch - nchannelsf01HE]
// : idsf01HE[gch];
auto const deltaT = gch >= nchannelsf01HE && gch < nchannelsf015 ? timeSigmaHPD : timeSigmaSiPM;
auto const did = DetId{id};
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
auto const recoPulseShapeId = recoPulseShapeIds[hashedId];
auto const* acc25nsVec = acc25nsVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin;
auto const* diff25nsItvlVec = diff25nsItvlVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin;
auto const* accVarLenIdxMinusOneVec = accVarLenIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* diffVarItvlIdxMinusOneVec =
diffVarItvlIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* accVarLenIdxZeroVec = accVarLenIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* diffVarItvlIdxZeroVec = diffVarItvlIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
// offset output arrays
auto* pulseMatrix = pulseMatrices + nsamples * npulses * gch;
auto* pulseMatrixM = pulseMatricesM + nsamples * npulses * gch;
auto* pulseMatrixP = pulseMatricesP + nsamples * npulses * gch;
// amplitude per ipulse
int const soi = soiSamples[gch];
int const pulseOffset = pulseOffsets[ipulse];
auto const amplitude = amplitudes[gch * nsamples + pulseOffset + soi];
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
#ifdef HCAL_MAHI_GPUDEBUG
if (sample == 0 && ipulse == 0) {
for (int i = 0; i < 8; i++)
printf("amplitude(%d) = %f\n", i, amplitudes[gch * nsamples + i]);
printf("acc25nsVec and diff25nsItvlVec for recoPulseShapeId = %u\n", recoPulseShapeId);
for (int i = 0; i < 256; i++) {
printf("acc25nsVec(%d) = %f diff25nsItvlVec(%d) = %f\n", i, acc25nsVec[i], i, diff25nsItvlVec[i]);
}
printf("accVarLenIdxZEROVec and accVarLenIdxMinusOneVec\n");
for (int i = 0; i < 25; i++) {
printf("accVarLenIdxZEROVec(%d) = %f accVarLenIdxMinusOneVec(%d) = %f\n",
i,
accVarLenIdxZeroVec[i],
i,
accVarLenIdxMinusOneVec[i]);
}
printf("diffVarItvlIdxZEROVec and diffVarItvlIdxMinusOneVec\n");
for (int i = 0; i < 25; i++) {
printf("diffVarItvlIdxZEROVec(%d) = %f diffVarItvlIdxMinusOneVec(%d) = %f\n",
i,
diffVarItvlIdxZeroVec[i],
i,
diffVarItvlIdxMinusOneVec[i]);
}
}
#endif
auto t0 = meanTime;
if (applyTimeSlew) {
if (amplitude <= 1.0f)
t0 += hcal::reconstruction::compute_time_slew_delay(1.0, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew);
else
t0 += hcal::reconstruction::compute_time_slew_delay(amplitude, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew);
}
auto const t0m = -deltaT + t0;
auto const t0p = deltaT + t0;
#ifdef HCAL_MAHI_GPUDEBUG
if (sample == 0 && ipulse == 0) {
printf("time values: %f %f %f\n", t0, t0m, t0p);
}
if (sample == 0 && ipulse == 0) {
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulse(%d) = %f\n", i, value);
}
printf("\n");
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0p,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulseP(%d) = %f\n", i, value);
}
printf("\n");
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0m,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulseM(%d) = %f\n", i, value);
}
}
#endif
// FIXME: shift should be treated properly,
// here assume 8 time slices and 8 samples
auto const shift = 4 - soi; // as in cpu version!
// auto const offset = ipulse - soi;
// auto const idx = sample - offset;
int32_t const idx = sample - pulseOffset;
auto const value = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
auto const value_t0m = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0m,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
auto const value_t0p = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0p,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
// store to global
if (amplitude > 0.f) {
pulseMatrix[ipulse * nsamples + sample] = value;
pulseMatrixM[ipulse * nsamples + sample] = value_t0m;
pulseMatrixP[ipulse * nsamples + sample] = value_t0p;
} else {
pulseMatrix[ipulse * nsamples + sample] = 0.f;
pulseMatrixM[ipulse * nsamples + sample] = 0.f;
pulseMatrixP[ipulse * nsamples + sample] = 0.f;
}
}
template <int NSAMPLES, int NPULSES>
__forceinline__ __device__ void update_covariance(
calo::multifit::ColumnVector<NPULSES> const& resultAmplitudesVector,
calo::multifit::MapSymM<float, NSAMPLES>& covarianceMatrix,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrix,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixM,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixP) {
CMS_UNROLL_LOOP
for (int ipulse = 0; ipulse < NPULSES; ipulse++) {
auto const resultAmplitude = resultAmplitudesVector(ipulse);
if (resultAmplitude == 0)
continue;
#ifdef HCAL_MAHI_GPUDEBUG
printf("pulse cov array for ibx = %d\n", ipulse);
#endif
// preload a column
float pmcol[NSAMPLES], pmpcol[NSAMPLES], pmmcol[NSAMPLES];
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++) {
pmcol[counter] = __ldg(&pulseMatrix.coeffRef(counter, ipulse));
pmpcol[counter] = __ldg(&pulseMatrixP.coeffRef(counter, ipulse));
pmmcol[counter] = __ldg(&pulseMatrixM.coeffRef(counter, ipulse));
}
auto const ampl2 = resultAmplitude * resultAmplitude;
CMS_UNROLL_LOOP
for (int col = 0; col < NSAMPLES; col++) {
auto const valueP_col = pmpcol[col];
auto const valueM_col = pmmcol[col];
auto const value_col = pmcol[col];
auto const tmppcol = valueP_col - value_col;
auto const tmpmcol = valueM_col - value_col;
// diagonal
auto tmp_value = 0.5 * (tmppcol * tmppcol + tmpmcol * tmpmcol);
covarianceMatrix(col, col) += ampl2 * tmp_value;
// FIXME: understand if this actually gets unrolled
CMS_UNROLL_LOOP
for (int row = col + 1; row < NSAMPLES; row++) {
float const valueP_row = pmpcol[row]; //pulseMatrixP(j, ipulseReal);
float const value_row = pmcol[row]; //pulseMatrix(j, ipulseReal);
float const valueM_row = pmmcol[row]; //pulseMatrixM(j, ipulseReal);
float tmpprow = valueP_row - value_row;
float tmpmrow = valueM_row - value_row;
auto const covValue = 0.5 * (tmppcol * tmpprow + tmpmcol * tmpmrow);
covarianceMatrix(row, col) += ampl2 * covValue;
}
}
}
}
template <int NSAMPLES, int NPULSES>
__global__ void kernel_minimize(float* outputEnergy,
float* outputChi2,
float const* __restrict__ inputAmplitudes,
float const* __restrict__ pulseMatrices,
float const* __restrict__ pulseMatricesM,
float const* __restrict__ pulseMatricesP,
int const* __restrict__ pulseOffsetValues,
float const* __restrict__ noiseTerms,
float const* __restrict__ electronicNoiseTerms,
int8_t const* __restrict__ soiSamples,
float const* __restrict__ noiseCorrelationValues,
float const* __restrict__ pedestalWidths,
float const* __restrict__ effectivePedestalWidths,
bool const useEffectivePedestals,
uint32_t const* __restrict__ idsf01HE,
uint32_t const* __restrict__ idsf5HB,
uint32_t const* __restrict__ idsf3HB,
float const* __restrict__ gainValues,
float const* __restrict__ respCorrectionValues,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint32_t const nchannelsTotal,
uint32_t const offsetForHashes,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE) {
// can be relaxed if needed - minor updates are needed in that case!
static_assert(NPULSES == NSAMPLES);
// indices
auto const gch = threadIdx.x + blockIdx.x * blockDim.x;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
if (gch >= nchannelsTotal)
return;
// if chi2 is set to -9999 do not run minimization
if (outputChi2[gch] == -9999.f)
return;
// configure shared mem
extern __shared__ char shrmem[];
float* shrMatrixLFnnlsStorage =
reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * threadIdx.x;
float* shrAtAStorage = reinterpret_cast<float*>(shrmem) +
calo::multifit::MapSymM<float, NPULSES>::total * (threadIdx.x + blockDim.x);
// conditions for pedestal widths
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
auto const did = DetId{id};
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestalWidths + hashedId * 4
: pedestalWidths + hashedId * 4;
auto const averagePedestalWidth2 = 0.25 * (pedestalWidthsForChannel[0] * pedestalWidthsForChannel[0] +
pedestalWidthsForChannel[1] * pedestalWidthsForChannel[1] +
pedestalWidthsForChannel[2] * pedestalWidthsForChannel[2] +
pedestalWidthsForChannel[3] * pedestalWidthsForChannel[3]);
auto const* gains = gainValues + hashedId * 4;
// FIXME on cpu ts 0 capid was used - does it make any difference
auto const gain = gains[0];
auto const respCorrection = respCorrectionValues[hashedId];
auto const noisecorr = noiseCorrelationValues[hashedId];
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
/*
// TODO: provide this properly
int const soi = soiSamples[gch];
*/
calo::multifit::ColumnVector<NPULSES, int> pulseOffsets;
CMS_UNROLL_LOOP
for (int i = 0; i < NPULSES; ++i)
pulseOffsets(i) = i;
// pulseOffsets(i) = pulseOffsetValues[i] - pulseOffsetValues[0];
// output amplitudes/weights
calo::multifit::ColumnVector<NPULSES> resultAmplitudesVector = calo::multifit::ColumnVector<NPULSES>::Zero();
// map views
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> inputAmplitudesView{inputAmplitudes + gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseTermsView{noiseTerms + gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseElectronicView{electronicNoiseTerms +
gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixMView{pulseMatricesM +
gch * NSAMPLES * NPULSES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixPView{pulseMatricesP +
gch * NSAMPLES * NPULSES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixView{pulseMatrices +
gch * NSAMPLES * NPULSES};
#ifdef HCAL_MAHI_GPUDEBUG
for (int i = 0; i < NSAMPLES; i++)
printf("inputValues(%d) = %f noiseTerms(%d) = %f\n", i, inputAmplitudesView(i), i, noiseTermsView(i));
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixView(i, j));
printf("\n");
}
printf("\n");
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixMView(i, j));
printf("\n");
}
printf("\n");
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixPView(i, j));
printf("\n");
}
#endif
int npassive = 0;
float chi2 = 0, previous_chi2 = 0.f, chi2_2itersback = 0.f;
for (int iter = 1; iter < nMaxItersMin; iter++) {
//float covarianceMatrixStorage[MapSymM<float, NSAMPLES>::total];
// NOTE: only works when NSAMPLES == NPULSES
// if does not hold -> slightly rearrange shared mem to still reuse
// shared memory
float* covarianceMatrixStorage = shrMatrixLFnnlsStorage;
calo::multifit::MapSymM<float, NSAMPLES> covarianceMatrix{covarianceMatrixStorage};
CMS_UNROLL_LOOP
for (int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::total; counter++)
covarianceMatrixStorage[counter] = (noisecorr != 0.f) ? 0.f : averagePedestalWidth2;
CMS_UNROLL_LOOP
for (unsigned int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::stride; counter++) {
covarianceMatrix(counter, counter) += noiseTermsView.coeffRef(counter);
if (counter != 0)
covarianceMatrix(counter, counter - 1) += noisecorr * __ldg(&noiseElectronicView.coeffRef(counter - 1)) *
__ldg(&noiseElectronicView.coeffRef(counter));
}
// update covariance matrix
update_covariance(
resultAmplitudesVector, covarianceMatrix, glbPulseMatrixView, glbPulseMatrixMView, glbPulseMatrixPView);
#ifdef HCAL_MAHI_GPUDEBUG
printf("covariance matrix\n");
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++)
printf("%f ", covarianceMatrix(i, j));
printf("\n");
}
#endif
// compute Cholesky Decomposition L matrix
//matrixDecomposition.compute(covarianceMatrix);
//auto const& matrixL = matrixDecomposition.matrixL();
float matrixLStorage[calo::multifit::MapSymM<float, NSAMPLES>::total];
calo::multifit::MapSymM<float, NSAMPLES> matrixL{matrixLStorage};
calo::multifit::compute_decomposition_unrolled(matrixL, covarianceMatrix);
//
// replace eigen
//
//auto const& A = matrixDecomposition
// .matrixL()
// .solve(pulseMatrixView);
calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES> A;
calo::multifit::solve_forward_subst_matrix(A, glbPulseMatrixView, matrixL);
//
// remove eigen
//
//auto const& b = matrixL
// .solve(inputAmplitudesView);
//
float reg_b[NSAMPLES];
calo::multifit::solve_forward_subst_vector(reg_b, inputAmplitudesView, matrixL);
// TODO: we do not really need to change these matrcies
// will be fixed in the optimized version
//ColMajorMatrix<NPULSES, NPULSES> AtA = A.transpose() * A;
//ColumnVector<NPULSES> Atb = A.transpose() * b;
//ColMajorMatrix<NPULSES, NPULSES> AtA;
//float AtAStorage[MapSymM<float, NPULSES>::total];
calo::multifit::MapSymM<float, NPULSES> AtA{shrAtAStorage};
calo::multifit::ColumnVector<NPULSES> Atb;
CMS_UNROLL_LOOP
for (int icol = 0; icol < NPULSES; icol++) {
float reg_ai[NSAMPLES];
// load column icol
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
reg_ai[counter] = A(counter, icol);
// compute diagonal
float sum = 0.f;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum += reg_ai[counter] * reg_ai[counter];
// store
AtA(icol, icol) = sum;
// go thru the other columns
CMS_UNROLL_LOOP
for (int j = icol + 1; j < NPULSES; j++) {
// load column j
float reg_aj[NSAMPLES];
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
reg_aj[counter] = A(counter, j);
// accum
float sum = 0.f;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum += reg_aj[counter] * reg_ai[counter];
// store
//AtA(icol, j) = sum;
AtA(j, icol) = sum;
}
// Atb accum
float sum_atb = 0;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum_atb += reg_ai[counter] * reg_b[counter];
// store atb
Atb(icol) = sum_atb;
}
#ifdef HCAL_MAHI_GPUDEBUG
printf("AtA\n");
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++)
printf("%f ", AtA(i, j));
printf("\n");
}
printf("Atb\n");
for (int i = 0; i < 8; i++)
printf("%f ", Atb(i));
printf("\n");
printf("result Amplitudes before nnls\n");
for (int i = 0; i < 8; i++)
printf("%f ", resultAmplitudesVector(i));
printf("\n");
#endif
// for fnnls
calo::multifit::MapSymM<float, NPULSES> matrixLForFnnls{shrMatrixLFnnlsStorage};
// run fast nnls
calo::multifit::fnnls(
AtA, Atb, resultAmplitudesVector, npassive, pulseOffsets, matrixLForFnnls, nnlsThresh, nMaxItersNNLS, 10, 10);
#ifdef HCAL_MAHI_GPUDEBUG
printf("result Amplitudes\n");
for (int i = 0; i < 8; i++)
printf("resultAmplitudes(%d) = %f\n", i, resultAmplitudesVector(i));
#endif
calo::multifit::calculateChiSq(matrixL, glbPulseMatrixView, resultAmplitudesVector, inputAmplitudesView, chi2);
auto const deltaChi2 = std::abs(chi2 - previous_chi2);
if (chi2 == chi2_2itersback && chi2 < previous_chi2)
break;
// update
chi2_2itersback = previous_chi2;
previous_chi2 = chi2;
// exit condition
if (deltaChi2 < deltaChi2Threashold)
break;
}
#ifdef HCAL_MAHI_GPUDEBUG
for (int i = 0; i < NPULSES; i++)
printf("pulseOffsets(%d) = %d outputAmplitudes(%d) = %f\n", i, pulseOffsets(i), i, resultAmplitudesVector(i));
printf("chi2 = %f\n", chi2);
#endif
outputChi2[gch] = chi2;
auto const idx_for_energy = std::abs(pulseOffsetValues[0]);
outputEnergy[gch] = (gain * resultAmplitudesVector(idx_for_energy)) * respCorrection;
/*
CMS_UNROLL_LOOP
for (int i=0; i<NPULSES; i++)
if (pulseOffsets[i] == soi)
// NOTE: gain is a number < 10^-3/4, multiply first to avoid stab issues
outputEnergy[gch] = (gain*resultAmplitudesVector(i))*respCorrection;
*/
}
} // namespace mahi
} // namespace hcal
namespace hcal {
namespace reconstruction {
void entryPoint(InputDataGPU const& inputGPU,
OutputDataGPU& outputGPU,
ConditionsProducts const& conditions,
ScratchDataGPU& scratch,
ConfigParameters const& configParameters,
hipStream_t cudaStream) {
auto const totalChannels = inputGPU.f01HEDigis.size + inputGPU.f5HBDigis.size + inputGPU.f3HBDigis.size;
// protections when the detector is out
if (totalChannels == 0)
return;
// FIXME: may be move this assignment to emphasize this more clearly
// FIXME: number of channels for output might change given that
// some channesl might be filtered out
outputGPU.recHits.size = totalChannels;
// TODO: this can be lifted by implementing a separate kernel
// similar to the default one, but properly handling the diff in #sample
// or modifying existing one
auto const f01nsamples = compute_nsamples<Flavor1>(inputGPU.f01HEDigis.stride);
auto const f5nsamples = compute_nsamples<Flavor5>(inputGPU.f5HBDigis.stride);
auto const f3nsamples = compute_nsamples<Flavor3>(inputGPU.f3HBDigis.stride);
int constexpr windowSize = 8;
int const startingSample = f01nsamples - windowSize;
assert(startingSample == 0 || startingSample == 2);
if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f5HBDigis.stride > 0)
assert(f01nsamples == f5nsamples);
if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f3HBDigis.stride > 0)
assert(f01nsamples == f3nsamples);
dim3 threadsPerBlock{windowSize, configParameters.kprep1dChannelsPerBlock};
int blocks = static_cast<uint32_t>(threadsPerBlock.y) > totalChannels
? 1
: (totalChannels + threadsPerBlock.y - 1) / threadsPerBlock.y;
int nbytesShared =
((2 * windowSize + 2) * sizeof(float) + sizeof(uint64_t)) * configParameters.kprep1dChannelsPerBlock;
hipLaunchKernelGGL(( hcal::mahi::kernel_prep1d_sameNumberOfSamples), dim3(blocks), dim3(threadsPerBlock), nbytesShared, cudaStream,
scratch.amplitudes.get(),
scratch.noiseTerms.get(),
scratch.electronicNoiseTerms.get(),
outputGPU.recHits.energy.get(),
outputGPU.recHits.chi2.get(),
inputGPU.f01HEDigis.data.get(),
inputGPU.f5HBDigis.data.get(),
inputGPU.f3HBDigis.data.get(),
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
inputGPU.f01HEDigis.stride,
inputGPU.f5HBDigis.stride,
inputGPU.f3HBDigis.stride,
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
inputGPU.f5HBDigis.npresamples.get(),
scratch.soiSamples.get(),
outputGPU.recHits.energyM0.get(),
outputGPU.recHits.timeM0.get(),
outputGPU.recHits.did.get(),
totalChannels,
conditions.recoParams.param1,
conditions.recoParams.param2,
conditions.qieCoders.offsets,
conditions.qieCoders.slopes,
conditions.qieTypes.values,
conditions.pedestalWidths.values,
conditions.effectivePedestalWidths.values,
conditions.pedestals.values,
conditions.convertedEffectivePedestals ? conditions.convertedEffectivePedestals->values
: conditions.pedestals.values,
configParameters.useEffectivePedestals,
conditions.sipmParameters.type,
conditions.sipmParameters.fcByPE,
conditions.sipmCharacteristics.parLin1,
conditions.sipmCharacteristics.parLin2,
conditions.sipmCharacteristics.parLin3,
conditions.gains.values,
conditions.respCorrs.values,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1),
configParameters.sipmQTSShift,
configParameters.sipmQNTStoSum,
configParameters.firstSampleShift,
conditions.offsetForHashes,
configParameters.ts4Thresh,
startingSample);
cudaCheck(hipGetLastError());
// 1024 is the max threads per block for gtx1080
// FIXME: take this from cuda service or something like that
uint32_t const channelsPerBlock = 1024 / (windowSize * conditions.pulseOffsetsHost.size());
dim3 threadsPerBlock2{windowSize, static_cast<uint32_t>(conditions.pulseOffsetsHost.size()), channelsPerBlock};
int blocks2 =
threadsPerBlock2.z > totalChannels ? 1 : (totalChannels + threadsPerBlock2.z - 1) / threadsPerBlock2.z;
#ifdef HCAL_MAHI_CPUDEBUG
std::cout << "threads: " << threadsPerBlock2.x << " " << threadsPerBlock2.y << " " << threadsPerBlock2.z
<< std::endl;
std::cout << "blocks: " << blocks2 << std::endl;
#endif
hipLaunchKernelGGL(( hcal::mahi::kernel_prep_pulseMatrices_sameNumberOfSamples), dim3(blocks2), dim3(threadsPerBlock2), 0, cudaStream,
scratch.pulseMatrices.get(),
scratch.pulseMatricesM.get(),
scratch.pulseMatricesP.get(),
conditions.pulseOffsets.values,
scratch.amplitudes.get(),
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
totalChannels,
scratch.soiSamples.get(),
conditions.recoParams.ids,
conditions.recoParams.acc25nsVec,
conditions.recoParams.diff25nsItvlVec,
conditions.recoParams.accVarLenIdxMinusOneVec,
conditions.recoParams.diffVarItvlIdxMinusOneVec,
conditions.recoParams.accVarLenIdxZEROVec,
conditions.recoParams.diffVarItvlIdxZEROVec,
configParameters.meanTime,
configParameters.timeSigmaSiPM,
configParameters.timeSigmaHPD,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1),
conditions.offsetForHashes,
configParameters.applyTimeSlew,
configParameters.tzeroTimeSlew,
configParameters.slopeTimeSlew,
configParameters.tmaxTimeSlew);
cudaCheck(hipGetLastError());
// number of samples is checked in above assert
if (conditions.pulseOffsetsHost.size() == 8u) {
// FIXME: provide constants from configuration
uint32_t threadsPerBlock = configParameters.kernelMinimizeThreads[0];
uint32_t blocks = threadsPerBlock > totalChannels ? 1 : (totalChannels + threadsPerBlock - 1) / threadsPerBlock;
auto const nbytesShared = 2 * threadsPerBlock * calo::multifit::MapSymM<float, 8>::total * sizeof(float);
hipLaunchKernelGGL(( hcal::mahi::kernel_minimize<8, 8>), dim3(blocks), dim3(threadsPerBlock), nbytesShared, cudaStream,
outputGPU.recHits.energy.get(),
outputGPU.recHits.chi2.get(),
scratch.amplitudes.get(),
scratch.pulseMatrices.get(),
scratch.pulseMatricesM.get(),
scratch.pulseMatricesP.get(),
conditions.pulseOffsets.values,
scratch.noiseTerms.get(),
scratch.electronicNoiseTerms.get(),
scratch.soiSamples.get(),
conditions.sipmParameters.auxi2,
conditions.pedestalWidths.values,
conditions.effectivePedestalWidths.values,
configParameters.useEffectivePedestals,
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
conditions.gains.values,
conditions.respCorrs.values,
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
totalChannels,
conditions.offsetForHashes,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1));
} else {
throw cms::Exception("Invalid MahiGPU configuration")
<< "Currently support only 8 pulses and 8 time samples and provided: " << f01nsamples << " samples and "
<< conditions.pulseOffsetsHost.size() << " pulses" << std::endl;
}
}
} // namespace reconstruction
} // namespace hcal
| ea917eb8b28da1658b166068c6eac1d2ca0698ee.cu | #include <Eigen/Dense>
#include "DataFormats/CaloRecHit/interface/MultifitComputations.h"
// needed to compile with USER_CXXFLAGS="-DCOMPUTE_TDC_TIME"
#include "DataFormats/HcalRecHit/interface/HcalSpecialTimes.h"
#include "FWCore/Utilities/interface/CMSUnrollLoop.h"
// TODO reuse some of the HCAL constats from
//#include "RecoLocalCalo/HcalRecAlgos/interface/HcalConstants.h"
#include "SimpleAlgoGPU.h"
#include "KernelHelpers.h"
#ifdef HCAL_MAHI_GPUDEBUG
#define DETID_TO_DEBUG 1125647428
#endif
namespace hcal {
namespace mahi {
// TODO: provide constants from configuration
// from RecoLocalCalo/HcalRecProducers/python/HBHEMahiParameters_cfi.py
constexpr int nMaxItersMin = 50;
constexpr int nMaxItersNNLS = 500;
constexpr double nnlsThresh = 1e-11;
constexpr float deltaChi2Threashold = 1e-3;
// from RecoLocalCalo/HcalRecProducers/src/HBHEPhase1Reconstructor.cc
__forceinline__ __device__ float get_raw_charge(double const charge,
double const pedestal,
float const* shrChargeMinusPedestal,
float const* parLin1Values,
float const* parLin2Values,
float const* parLin3Values,
int32_t const nsamplesForCompute,
int32_t const soi,
int const sipmQTSShift,
int const sipmQNTStoSum,
int const sipmType,
float const fcByPE,
bool const isqie11) {
float rawCharge;
if (!isqie11)
rawCharge = charge;
else {
auto const parLin1 = parLin1Values[sipmType - 1];
auto const parLin2 = parLin2Values[sipmType - 1];
auto const parLin3 = parLin3Values[sipmType - 1];
int const first = std::max(soi + sipmQTSShift, 0);
int const last = std::min(soi + sipmQNTStoSum, nsamplesForCompute);
float sipmq = 0.0f;
for (auto ts = first; ts < last; ts++)
sipmq += shrChargeMinusPedestal[threadIdx.y * nsamplesForCompute + ts];
auto const effectivePixelsFired = sipmq / fcByPE;
auto const factor =
hcal::reconstruction::compute_reco_correction_factor(parLin1, parLin2, parLin3, effectivePixelsFired);
rawCharge = (charge - pedestal) * factor + pedestal;
#ifdef HCAL_MAHI_GPUDEBUG
printf("first = %d last = %d sipmQ = %f factor = %f rawCharge = %f\n", first, last, sipmq, factor, rawCharge);
#endif
}
return rawCharge;
}
// Assume: same number of samples for HB and HE
// TODO: add/validate restrict (will increase #registers in use by the kernel)
__global__ void kernel_prep1d_sameNumberOfSamples(float* amplitudes,
float* noiseTerms,
float* electronicNoiseTerms,
float* outputEnergy,
float* outputChi2,
uint16_t const* dataf01HE,
uint16_t const* dataf5HB,
uint16_t const* dataf3HB,
uint32_t const* idsf01HE,
uint32_t const* idsf5HB,
uint32_t const* idsf3HB,
uint32_t const stridef01HE,
uint32_t const stridef5HB,
uint32_t const stridef3HB,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint8_t const* npresamplesf5HB,
int8_t* soiSamples,
float* method0Energy,
float* method0Time,
uint32_t* outputdid,
uint32_t const nchannels,
uint32_t const* recoParam1Values,
uint32_t const* recoParam2Values,
float const* qieCoderOffsets,
float const* qieCoderSlopes,
int const* qieTypes,
float const* pedestalWidths,
float const* effectivePedestalWidths,
float const* pedestals,
float const* effectivePedestals,
bool const useEffectivePedestals,
int const* sipmTypeValues,
float const* fcByPEValues,
float const* parLin1Values,
float const* parLin2Values,
float const* parLin3Values,
float const* gainValues,
float const* respCorrectionValues,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE,
int const sipmQTSShift,
int const sipmQNTStoSum,
int const firstSampleShift,
uint32_t const offsetForHashes,
float const ts4Thresh,
int const startingSample) {
// indices + runtime constants
auto const sample = threadIdx.x + startingSample;
auto const sampleWithinWindow = threadIdx.x;
int32_t const nsamplesForCompute = blockDim.x;
auto const lch = threadIdx.y;
auto const gch = lch + blockDim.y * blockIdx.x;
auto const nchannels_per_block = blockDim.y;
auto const linearThPerBlock = threadIdx.x + threadIdx.y * blockDim.x;
// remove
if (gch >= nchannels)
return;
// initialize all output buffers
if (sampleWithinWindow == 0) {
outputdid[gch] = 0;
method0Energy[gch] = 0;
method0Time[gch] = 0;
outputEnergy[gch] = 0;
outputChi2[gch] = 0;
}
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_SINGLECHANNEL
if (gch > 0)
return;
#endif
#endif
// configure shared mem
extern __shared__ char smem[];
float* shrEnergyM0PerTS = reinterpret_cast<float*>(smem);
float* shrChargeMinusPedestal = shrEnergyM0PerTS + nsamplesForCompute * nchannels_per_block;
float* shrMethod0EnergyAccum = shrChargeMinusPedestal + nsamplesForCompute * nchannels_per_block;
float* shrEnergyM0TotalAccum = shrMethod0EnergyAccum + nchannels_per_block;
unsigned long long int* shrMethod0EnergySamplePair =
reinterpret_cast<unsigned long long int*>(shrEnergyM0TotalAccum + nchannels_per_block);
if (sampleWithinWindow == 0) {
shrMethod0EnergyAccum[lch] = 0;
shrMethod0EnergySamplePair[lch] = __float_as_uint(std::numeric_limits<float>::min());
shrEnergyM0TotalAccum[lch] = 0;
}
// offset output
auto* amplitudesForChannel = amplitudes + nsamplesForCompute * gch;
auto* noiseTermsForChannel = noiseTerms + nsamplesForCompute * gch;
auto* electronicNoiseTermsForChannel = electronicNoiseTerms + nsamplesForCompute * gch;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
// get event input quantities
auto const stride = gch < nchannelsf01HE ? stridef01HE : (gch < nchannelsf015 ? stridef5HB : stridef3HB);
auto const nsamples = gch < nchannelsf01HE ? compute_nsamples<Flavor1>(stride)
: (gch < nchannelsf015 ? compute_nsamples<Flavor5>(stride)
: compute_nsamples<Flavor3>(stride));
#ifdef HCAL_MAHI_GPUDEBUG
assert(nsamples == nsamplesForCompute || nsamples - startingSample == nsamplesForCompute);
#endif
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
auto const did = HcalDetId{id};
auto const adc =
gch < nchannelsf01HE
? adc_for_sample<Flavor1>(dataf01HE + stride * gch, sample)
: (gch < nchannelsf015 ? adc_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample)
: adc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
auto const capid =
gch < nchannelsf01HE
? capid_for_sample<Flavor1>(dataf01HE + stride * gch, sample)
: (gch < nchannelsf015 ? capid_for_sample<Flavor5>(dataf5HB + stride * (gch - nchannelsf01HE), sample)
: capid_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
// compute hash for this did
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
// conditions based on the hash
// FIXME: remove hardcoded values
auto const qieType = qieTypes[hashedId] > 0 ? 1 : 0; // 2 types at this point
auto const* qieOffsets = qieCoderOffsets + hashedId * HcalQIECodersGPU::numValuesPerChannel;
auto const* qieSlopes = qieCoderSlopes + hashedId * HcalQIECodersGPU::numValuesPerChannel;
auto const* pedestalsForChannel = pedestals + hashedId * 4;
auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestalWidths + hashedId * 4
: pedestalWidths + hashedId * 4;
auto const* gains = gainValues + hashedId * 4;
auto const gain = gains[capid];
auto const gain0 = gains[0];
auto const respCorrection = respCorrectionValues[hashedId];
auto const pedestal = pedestalsForChannel[capid];
auto const pedestalWidth = pedestalWidthsForChannel[capid];
// if needed, only use effective pedestals for f01
auto const pedestalToUseForMethod0 = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestals[hashedId * 4 + capid]
: pedestal;
auto const sipmType = sipmTypeValues[hashedId];
auto const fcByPE = fcByPEValues[hashedId];
auto const recoParam1 = recoParam1Values[hashedId];
auto const recoParam2 = recoParam2Values[hashedId];
#ifdef HCAL_MAHI_GPUDEBUG
printf("qieType = %d qieOffset0 = %f qieOffset1 = %f qieSlope0 = %f qieSlope1 = %f\n",
qieType,
qieOffsets[0],
qieOffsets[1],
qieSlopes[0],
qieSlopes[1]);
#endif
// compute charge
auto const charge = hcal::reconstruction::compute_coder_charge(qieType, adc, capid, qieOffsets, qieSlopes);
shrChargeMinusPedestal[linearThPerBlock] = charge - pedestal;
if (gch < nchannelsf01HE) {
// NOTE: assume that soi is high only for a single guy!
// which must be the case. cpu version does not check for that
// if that is not the case, we will see that with cuda mmecheck
auto const soibit = soibit_for_sample<Flavor1>(dataf01HE + stride * gch, sample);
if (soibit == 1)
soiSamples[gch] = sampleWithinWindow;
} else if (gch >= nchannelsf015) {
auto const soibit = soibit_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample);
if (soibit == 1)
soiSamples[gch] = sampleWithinWindow;
}
__syncthreads();
int32_t const soi = gch < nchannelsf01HE
? soiSamples[gch]
: (gch < nchannelsf015 ? npresamplesf5HB[gch - nchannelsf01HE] : soiSamples[gch]);
//int32_t const soi = gch >= nchannelsf01HE
// ? npresamplesf5HB[gch - nchannelsf01HE]
// : soiSamples[gch];
// this is here just to make things uniform...
if (gch >= nchannelsf01HE && gch < nchannelsf015 && sampleWithinWindow == 0)
soiSamples[gch] = npresamplesf5HB[gch - nchannelsf01HE];
//
// compute various quantities (raw charge and tdc stuff)
// NOTE: this branch will be divergent only for a single warp that
// sits on the boundary when flavor 01 channels end and flavor 5 start
//
float const rawCharge = get_raw_charge(charge,
pedestal,
shrChargeMinusPedestal,
parLin1Values,
parLin2Values,
parLin3Values,
nsamplesForCompute,
soi,
sipmQTSShift,
sipmQNTStoSum,
sipmType,
fcByPE,
gch < nchannelsf01HE || gch >= nchannelsf015);
auto const dfc = hcal::reconstruction::compute_diff_charge_gain(
qieType, adc, capid, qieOffsets, qieSlopes, gch < nchannelsf01HE || gch >= nchannelsf015);
#ifdef COMPUTE_TDC_TIME
float tdcTime;
if (gch >= nchannelsf01HE && gch < nchannelsf015) {
tdcTime = HcalSpecialTimes::UNKNOWN_T_NOTDC;
} else {
if (gch < nchannelsf01HE)
tdcTime = HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor1>(dataf01HE + stride * gch, sample));
else if (gch >= nchannelsf015)
tdcTime =
HcalSpecialTimes::getTDCTime(tdc_for_sample<Flavor3>(dataf3HB + stride * (gch - nchannelsf015), sample));
}
#endif // COMPUTE_TDC_TIME
// compute method 0 quantities
// TODO: need to apply containment
// TODO: need to apply time slew
// TODO: for < run 3, apply HBM legacy energy correction
auto const nsamplesToAdd = recoParam1 < 10 ? recoParam2 : (recoParam1 >> 14) & 0xF;
auto const startSampleTmp = soi + firstSampleShift;
auto const startSample = startSampleTmp < 0 ? 0 : startSampleTmp;
auto const endSample =
startSample + nsamplesToAdd < nsamplesForCompute ? startSample + nsamplesToAdd : nsamplesForCompute;
// NOTE: gain is a small number < 10^-3, multiply it last
auto const energym0_per_ts = gain * ((rawCharge - pedestalToUseForMethod0) * respCorrection);
auto const energym0_per_ts_gain0 = gain0 * ((rawCharge - pedestalToUseForMethod0) * respCorrection);
// store to shared mem
shrEnergyM0PerTS[lch * nsamplesForCompute + sampleWithinWindow] = energym0_per_ts;
atomicAdd(&shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0);
#ifdef HCAL_MAHI_GPUDEBUG
printf(
"id = %u sample = %d gch = %d hashedId = %u adc = %u capid = %u\n"
" charge = %f rawCharge = %f dfc = %f pedestal = %f\n"
" gain = %f respCorrection = %f energym0_per_ts = %f\n",
id,
sample,
gch,
hashedId,
adc,
capid,
charge,
rawCharge,
dfc,
pedestalToUseForMethod0,
gain,
respCorrection,
energym0_per_ts);
printf(
"startSample = %d endSample = %d param1 = %u param2 = %u\n", startSample, endSample, recoParam1, recoParam2);
#endif
if (sampleWithinWindow >= startSample && sampleWithinWindow < endSample) {
atomicAdd(&shrMethod0EnergyAccum[lch], energym0_per_ts);
// pack sample, energy as 64 bit value
unsigned long long int old = shrMethod0EnergySamplePair[lch], assumed;
unsigned long long int val =
(static_cast<unsigned long long int>(sampleWithinWindow) << 32) + __float_as_uint(energym0_per_ts);
do {
assumed = old;
// decode energy, sample values
//int const current_sample = (assumed >> 32) & 0xffffffff;
float const current_energy = __uint_as_float(assumed & 0xffffffff);
if (energym0_per_ts > current_energy)
old = atomicCAS(&shrMethod0EnergySamplePair[lch], assumed, val);
else
break;
} while (assumed != old);
}
__syncthreads();
// NOTE: must take soi, as values for that thread are used...
if (sampleWithinWindow == soi) {
auto const method0_energy = shrMethod0EnergyAccum[lch];
auto const val = shrMethod0EnergySamplePair[lch];
int const max_sample = (val >> 32) & 0xffffffff;
float const max_energy = __uint_as_float(val & 0xffffffff);
float const max_energy_1 =
max_sample < nsamplesForCompute - 1 ? shrEnergyM0PerTS[lch * nsamplesForCompute + max_sample + 1] : 0.f;
float const position = nsamplesToAdd < nsamplesForCompute ? max_sample - soi : max_sample;
auto const sum = max_energy + max_energy_1;
// FIXME: for full comparison with cpu method 0 timing,
// need to correct by slew
// requires an accumulator -> more shared mem -> omit here unless
// really needed
float const time =
max_energy > 0.f && max_energy_1 > 0.f ? 25.f * (position + max_energy_1 / sum) : 25.f * position;
// store method0 quantities to global mem
outputdid[gch] = id;
method0Energy[gch] = method0_energy;
method0Time[gch] = time;
#ifdef HCAL_MAHI_GPUDEBUG
printf("tsTOT = %f tstrig = %f ts4Thresh = %f\n", shrEnergyM0TotalAccum[lch], energym0_per_ts_gain0, ts4Thresh);
#endif
// check as in cpu version if mahi is not needed
// FIXME: KNOWN ISSUE: observed a problem when rawCharge and pedestal
// are basically equal and generate -0.00000...
// needs to be treated properly
if (!(shrEnergyM0TotalAccum[lch] > 0 && energym0_per_ts_gain0 > ts4Thresh)) {
// do not need to run mahi minimization
//outputEnergy[gch] = 0; energy already inited to 0
outputChi2[gch] = -9999.f;
}
#ifdef HCAL_MAHI_GPUDEBUG
printf("method0_energy = %f max_sample = %d max_energy = %f time = %f\n",
method0_energy,
max_sample,
max_energy,
time);
#endif
}
//
// preparations for mahi fit
//
auto const amplitude = rawCharge - pedestalToUseForMethod0;
auto const noiseADC = (1. / std::sqrt(12)) * dfc;
auto const noisePhotoSq = amplitude > pedestalWidth ? (amplitude * fcByPE) : 0.f;
auto const noiseTerm = noiseADC * noiseADC + noisePhotoSq + pedestalWidth * pedestalWidth;
#ifdef HCAL_MAHI_GPUDEBUG
printf(
"charrge(%d) = %f pedestal(%d) = %f dfc(%d) = %f pedestalWidth(%d) = %f noiseADC(%d) = %f noisPhoto(%d) = "
"%f\n",
sample,
rawCharge,
sample,
pedestalToUseForMethod0,
sample,
dfc,
sample,
pedestalWidth,
sample,
noiseADC,
sample,
noisePhotoSq);
#endif
// store to global memory
amplitudesForChannel[sampleWithinWindow] = amplitude;
noiseTermsForChannel[sampleWithinWindow] = noiseTerm;
electronicNoiseTermsForChannel[sampleWithinWindow] = pedestalWidth;
}
// TODO: need to add an array of offsets for pulses (a la activeBXs...)
// Assume for now 8 pulses
__global__ void kernel_prep_pulseMatrices_sameNumberOfSamples(float* pulseMatrices,
float* pulseMatricesM,
float* pulseMatricesP,
int const* pulseOffsets,
float const* amplitudes,
uint32_t const* idsf01HE,
uint32_t const* idsf5HB,
uint32_t const* idsf3HB,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint32_t const nchannelsTotal,
int8_t const* soiSamples,
uint32_t const* recoPulseShapeIds,
float const* acc25nsVecValues,
float const* diff25nsItvlVecValues,
float const* accVarLenIdxMinusOneVecValues,
float const* diffVarItvlIdxMinusOneVecValues,
float const* accVarLenIdxZeroVecValues,
float const* diffVarItvlIdxZeroVecValues,
float const meanTime,
float const timeSigmaSiPM,
float const timeSigmaHPD,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE,
uint32_t const offsetForHashes,
bool const applyTimeSlew,
float const tzeroTimeSlew,
float const slopeTimeSlew,
float const tmaxTimeSlew) {
// indices
auto const ipulse = threadIdx.y;
auto const npulses = blockDim.y;
auto const sample = threadIdx.x;
auto const nsamples = blockDim.x;
auto const lch = threadIdx.z;
auto const gch = lch + blockIdx.x * blockDim.z;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
if (gch >= nchannelsTotal)
return;
// conditions
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
//auto const id = gch >= nchannelsf01HE
// ? idsf5HB[gch - nchannelsf01HE]
// : idsf01HE[gch];
auto const deltaT = gch >= nchannelsf01HE && gch < nchannelsf015 ? timeSigmaHPD : timeSigmaSiPM;
auto const did = DetId{id};
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
auto const recoPulseShapeId = recoPulseShapeIds[hashedId];
auto const* acc25nsVec = acc25nsVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin;
auto const* diff25nsItvlVec = diff25nsItvlVecValues + recoPulseShapeId * hcal::constants::maxPSshapeBin;
auto const* accVarLenIdxMinusOneVec = accVarLenIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* diffVarItvlIdxMinusOneVec =
diffVarItvlIdxMinusOneVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* accVarLenIdxZeroVec = accVarLenIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
auto const* diffVarItvlIdxZeroVec = diffVarItvlIdxZeroVecValues + recoPulseShapeId * hcal::constants::nsPerBX;
// offset output arrays
auto* pulseMatrix = pulseMatrices + nsamples * npulses * gch;
auto* pulseMatrixM = pulseMatricesM + nsamples * npulses * gch;
auto* pulseMatrixP = pulseMatricesP + nsamples * npulses * gch;
// amplitude per ipulse
int const soi = soiSamples[gch];
int const pulseOffset = pulseOffsets[ipulse];
auto const amplitude = amplitudes[gch * nsamples + pulseOffset + soi];
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
#ifdef HCAL_MAHI_GPUDEBUG
if (sample == 0 && ipulse == 0) {
for (int i = 0; i < 8; i++)
printf("amplitude(%d) = %f\n", i, amplitudes[gch * nsamples + i]);
printf("acc25nsVec and diff25nsItvlVec for recoPulseShapeId = %u\n", recoPulseShapeId);
for (int i = 0; i < 256; i++) {
printf("acc25nsVec(%d) = %f diff25nsItvlVec(%d) = %f\n", i, acc25nsVec[i], i, diff25nsItvlVec[i]);
}
printf("accVarLenIdxZEROVec and accVarLenIdxMinusOneVec\n");
for (int i = 0; i < 25; i++) {
printf("accVarLenIdxZEROVec(%d) = %f accVarLenIdxMinusOneVec(%d) = %f\n",
i,
accVarLenIdxZeroVec[i],
i,
accVarLenIdxMinusOneVec[i]);
}
printf("diffVarItvlIdxZEROVec and diffVarItvlIdxMinusOneVec\n");
for (int i = 0; i < 25; i++) {
printf("diffVarItvlIdxZEROVec(%d) = %f diffVarItvlIdxMinusOneVec(%d) = %f\n",
i,
diffVarItvlIdxZeroVec[i],
i,
diffVarItvlIdxMinusOneVec[i]);
}
}
#endif
auto t0 = meanTime;
if (applyTimeSlew) {
if (amplitude <= 1.0f)
t0 += hcal::reconstruction::compute_time_slew_delay(1.0, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew);
else
t0 += hcal::reconstruction::compute_time_slew_delay(amplitude, tzeroTimeSlew, slopeTimeSlew, tmaxTimeSlew);
}
auto const t0m = -deltaT + t0;
auto const t0p = deltaT + t0;
#ifdef HCAL_MAHI_GPUDEBUG
if (sample == 0 && ipulse == 0) {
printf("time values: %f %f %f\n", t0, t0m, t0p);
}
if (sample == 0 && ipulse == 0) {
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulse(%d) = %f\n", i, value);
}
printf("\n");
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0p,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulseP(%d) = %f\n", i, value);
}
printf("\n");
for (int i = 0; i < hcal::constants::maxSamples; i++) {
auto const value = hcal::reconstruction::compute_pulse_shape_value(t0m,
i,
0,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec);
printf("pulseM(%d) = %f\n", i, value);
}
}
#endif
// FIXME: shift should be treated properly,
// here assume 8 time slices and 8 samples
auto const shift = 4 - soi; // as in cpu version!
// auto const offset = ipulse - soi;
// auto const idx = sample - offset;
int32_t const idx = sample - pulseOffset;
auto const value = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
auto const value_t0m = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0m,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
auto const value_t0p = idx >= 0 && idx < nsamples
? hcal::reconstruction::compute_pulse_shape_value(t0p,
idx,
shift,
acc25nsVec,
diff25nsItvlVec,
accVarLenIdxMinusOneVec,
diffVarItvlIdxMinusOneVec,
accVarLenIdxZeroVec,
diffVarItvlIdxZeroVec)
: 0;
// store to global
if (amplitude > 0.f) {
pulseMatrix[ipulse * nsamples + sample] = value;
pulseMatrixM[ipulse * nsamples + sample] = value_t0m;
pulseMatrixP[ipulse * nsamples + sample] = value_t0p;
} else {
pulseMatrix[ipulse * nsamples + sample] = 0.f;
pulseMatrixM[ipulse * nsamples + sample] = 0.f;
pulseMatrixP[ipulse * nsamples + sample] = 0.f;
}
}
template <int NSAMPLES, int NPULSES>
__forceinline__ __device__ void update_covariance(
calo::multifit::ColumnVector<NPULSES> const& resultAmplitudesVector,
calo::multifit::MapSymM<float, NSAMPLES>& covarianceMatrix,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrix,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixM,
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> const& pulseMatrixP) {
CMS_UNROLL_LOOP
for (int ipulse = 0; ipulse < NPULSES; ipulse++) {
auto const resultAmplitude = resultAmplitudesVector(ipulse);
if (resultAmplitude == 0)
continue;
#ifdef HCAL_MAHI_GPUDEBUG
printf("pulse cov array for ibx = %d\n", ipulse);
#endif
// preload a column
float pmcol[NSAMPLES], pmpcol[NSAMPLES], pmmcol[NSAMPLES];
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++) {
pmcol[counter] = __ldg(&pulseMatrix.coeffRef(counter, ipulse));
pmpcol[counter] = __ldg(&pulseMatrixP.coeffRef(counter, ipulse));
pmmcol[counter] = __ldg(&pulseMatrixM.coeffRef(counter, ipulse));
}
auto const ampl2 = resultAmplitude * resultAmplitude;
CMS_UNROLL_LOOP
for (int col = 0; col < NSAMPLES; col++) {
auto const valueP_col = pmpcol[col];
auto const valueM_col = pmmcol[col];
auto const value_col = pmcol[col];
auto const tmppcol = valueP_col - value_col;
auto const tmpmcol = valueM_col - value_col;
// diagonal
auto tmp_value = 0.5 * (tmppcol * tmppcol + tmpmcol * tmpmcol);
covarianceMatrix(col, col) += ampl2 * tmp_value;
// FIXME: understand if this actually gets unrolled
CMS_UNROLL_LOOP
for (int row = col + 1; row < NSAMPLES; row++) {
float const valueP_row = pmpcol[row]; //pulseMatrixP(j, ipulseReal);
float const value_row = pmcol[row]; //pulseMatrix(j, ipulseReal);
float const valueM_row = pmmcol[row]; //pulseMatrixM(j, ipulseReal);
float tmpprow = valueP_row - value_row;
float tmpmrow = valueM_row - value_row;
auto const covValue = 0.5 * (tmppcol * tmpprow + tmpmcol * tmpmrow);
covarianceMatrix(row, col) += ampl2 * covValue;
}
}
}
}
template <int NSAMPLES, int NPULSES>
__global__ void kernel_minimize(float* outputEnergy,
float* outputChi2,
float const* __restrict__ inputAmplitudes,
float const* __restrict__ pulseMatrices,
float const* __restrict__ pulseMatricesM,
float const* __restrict__ pulseMatricesP,
int const* __restrict__ pulseOffsetValues,
float const* __restrict__ noiseTerms,
float const* __restrict__ electronicNoiseTerms,
int8_t const* __restrict__ soiSamples,
float const* __restrict__ noiseCorrelationValues,
float const* __restrict__ pedestalWidths,
float const* __restrict__ effectivePedestalWidths,
bool const useEffectivePedestals,
uint32_t const* __restrict__ idsf01HE,
uint32_t const* __restrict__ idsf5HB,
uint32_t const* __restrict__ idsf3HB,
float const* __restrict__ gainValues,
float const* __restrict__ respCorrectionValues,
uint32_t const nchannelsf01HE,
uint32_t const nchannelsf5HB,
uint32_t const nchannelsTotal,
uint32_t const offsetForHashes,
int const maxDepthHB,
int const maxDepthHE,
int const maxPhiHE,
int const firstHBRing,
int const lastHBRing,
int const firstHERing,
int const lastHERing,
int const nEtaHB,
int const nEtaHE) {
// can be relaxed if needed - minor updates are needed in that case!
static_assert(NPULSES == NSAMPLES);
// indices
auto const gch = threadIdx.x + blockIdx.x * blockDim.x;
auto const nchannelsf015 = nchannelsf01HE + nchannelsf5HB;
if (gch >= nchannelsTotal)
return;
// if chi2 is set to -9999 do not run minimization
if (outputChi2[gch] == -9999.f)
return;
// configure shared mem
extern __shared__ char shrmem[];
float* shrMatrixLFnnlsStorage =
reinterpret_cast<float*>(shrmem) + calo::multifit::MapSymM<float, NPULSES>::total * threadIdx.x;
float* shrAtAStorage = reinterpret_cast<float*>(shrmem) +
calo::multifit::MapSymM<float, NPULSES>::total * (threadIdx.x + blockDim.x);
// conditions for pedestal widths
auto const id = gch < nchannelsf01HE
? idsf01HE[gch]
: (gch < nchannelsf015 ? idsf5HB[gch - nchannelsf01HE] : idsf3HB[gch - nchannelsf015]);
auto const did = DetId{id};
auto const hashedId =
did.subdetId() == HcalBarrel
? hcal::reconstruction::did2linearIndexHB(id, maxDepthHB, firstHBRing, lastHBRing, nEtaHB)
: hcal::reconstruction::did2linearIndexHE(id, maxDepthHE, maxPhiHE, firstHERing, lastHERing, nEtaHE) +
offsetForHashes;
auto const* pedestalWidthsForChannel = useEffectivePedestals && (gch < nchannelsf01HE || gch >= nchannelsf015)
? effectivePedestalWidths + hashedId * 4
: pedestalWidths + hashedId * 4;
auto const averagePedestalWidth2 = 0.25 * (pedestalWidthsForChannel[0] * pedestalWidthsForChannel[0] +
pedestalWidthsForChannel[1] * pedestalWidthsForChannel[1] +
pedestalWidthsForChannel[2] * pedestalWidthsForChannel[2] +
pedestalWidthsForChannel[3] * pedestalWidthsForChannel[3]);
auto const* gains = gainValues + hashedId * 4;
// FIXME on cpu ts 0 capid was used - does it make any difference
auto const gain = gains[0];
auto const respCorrection = respCorrectionValues[hashedId];
auto const noisecorr = noiseCorrelationValues[hashedId];
#ifdef HCAL_MAHI_GPUDEBUG
#ifdef HCAL_MAHI_GPUDEBUG_FILTERDETID
if (id != DETID_TO_DEBUG)
return;
#endif
#endif
/*
// TODO: provide this properly
int const soi = soiSamples[gch];
*/
calo::multifit::ColumnVector<NPULSES, int> pulseOffsets;
CMS_UNROLL_LOOP
for (int i = 0; i < NPULSES; ++i)
pulseOffsets(i) = i;
// pulseOffsets(i) = pulseOffsetValues[i] - pulseOffsetValues[0];
// output amplitudes/weights
calo::multifit::ColumnVector<NPULSES> resultAmplitudesVector = calo::multifit::ColumnVector<NPULSES>::Zero();
// map views
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> inputAmplitudesView{inputAmplitudes + gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseTermsView{noiseTerms + gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColumnVector<NSAMPLES>> noiseElectronicView{electronicNoiseTerms +
gch * NSAMPLES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixMView{pulseMatricesM +
gch * NSAMPLES * NPULSES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixPView{pulseMatricesP +
gch * NSAMPLES * NPULSES};
Eigen::Map<const calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES>> glbPulseMatrixView{pulseMatrices +
gch * NSAMPLES * NPULSES};
#ifdef HCAL_MAHI_GPUDEBUG
for (int i = 0; i < NSAMPLES; i++)
printf("inputValues(%d) = %f noiseTerms(%d) = %f\n", i, inputAmplitudesView(i), i, noiseTermsView(i));
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixView(i, j));
printf("\n");
}
printf("\n");
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixMView(i, j));
printf("\n");
}
printf("\n");
for (int i = 0; i < NSAMPLES; i++) {
for (int j = 0; j < NPULSES; j++)
printf("%f ", glbPulseMatrixPView(i, j));
printf("\n");
}
#endif
int npassive = 0;
float chi2 = 0, previous_chi2 = 0.f, chi2_2itersback = 0.f;
for (int iter = 1; iter < nMaxItersMin; iter++) {
//float covarianceMatrixStorage[MapSymM<float, NSAMPLES>::total];
// NOTE: only works when NSAMPLES == NPULSES
// if does not hold -> slightly rearrange shared mem to still reuse
// shared memory
float* covarianceMatrixStorage = shrMatrixLFnnlsStorage;
calo::multifit::MapSymM<float, NSAMPLES> covarianceMatrix{covarianceMatrixStorage};
CMS_UNROLL_LOOP
for (int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::total; counter++)
covarianceMatrixStorage[counter] = (noisecorr != 0.f) ? 0.f : averagePedestalWidth2;
CMS_UNROLL_LOOP
for (unsigned int counter = 0; counter < calo::multifit::MapSymM<float, NSAMPLES>::stride; counter++) {
covarianceMatrix(counter, counter) += noiseTermsView.coeffRef(counter);
if (counter != 0)
covarianceMatrix(counter, counter - 1) += noisecorr * __ldg(&noiseElectronicView.coeffRef(counter - 1)) *
__ldg(&noiseElectronicView.coeffRef(counter));
}
// update covariance matrix
update_covariance(
resultAmplitudesVector, covarianceMatrix, glbPulseMatrixView, glbPulseMatrixMView, glbPulseMatrixPView);
#ifdef HCAL_MAHI_GPUDEBUG
printf("covariance matrix\n");
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++)
printf("%f ", covarianceMatrix(i, j));
printf("\n");
}
#endif
// compute Cholesky Decomposition L matrix
//matrixDecomposition.compute(covarianceMatrix);
//auto const& matrixL = matrixDecomposition.matrixL();
float matrixLStorage[calo::multifit::MapSymM<float, NSAMPLES>::total];
calo::multifit::MapSymM<float, NSAMPLES> matrixL{matrixLStorage};
calo::multifit::compute_decomposition_unrolled(matrixL, covarianceMatrix);
//
// replace eigen
//
//auto const& A = matrixDecomposition
// .matrixL()
// .solve(pulseMatrixView);
calo::multifit::ColMajorMatrix<NSAMPLES, NPULSES> A;
calo::multifit::solve_forward_subst_matrix(A, glbPulseMatrixView, matrixL);
//
// remove eigen
//
//auto const& b = matrixL
// .solve(inputAmplitudesView);
//
float reg_b[NSAMPLES];
calo::multifit::solve_forward_subst_vector(reg_b, inputAmplitudesView, matrixL);
// TODO: we do not really need to change these matrcies
// will be fixed in the optimized version
//ColMajorMatrix<NPULSES, NPULSES> AtA = A.transpose() * A;
//ColumnVector<NPULSES> Atb = A.transpose() * b;
//ColMajorMatrix<NPULSES, NPULSES> AtA;
//float AtAStorage[MapSymM<float, NPULSES>::total];
calo::multifit::MapSymM<float, NPULSES> AtA{shrAtAStorage};
calo::multifit::ColumnVector<NPULSES> Atb;
CMS_UNROLL_LOOP
for (int icol = 0; icol < NPULSES; icol++) {
float reg_ai[NSAMPLES];
// load column icol
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
reg_ai[counter] = A(counter, icol);
// compute diagonal
float sum = 0.f;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum += reg_ai[counter] * reg_ai[counter];
// store
AtA(icol, icol) = sum;
// go thru the other columns
CMS_UNROLL_LOOP
for (int j = icol + 1; j < NPULSES; j++) {
// load column j
float reg_aj[NSAMPLES];
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
reg_aj[counter] = A(counter, j);
// accum
float sum = 0.f;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum += reg_aj[counter] * reg_ai[counter];
// store
//AtA(icol, j) = sum;
AtA(j, icol) = sum;
}
// Atb accum
float sum_atb = 0;
CMS_UNROLL_LOOP
for (int counter = 0; counter < NSAMPLES; counter++)
sum_atb += reg_ai[counter] * reg_b[counter];
// store atb
Atb(icol) = sum_atb;
}
#ifdef HCAL_MAHI_GPUDEBUG
printf("AtA\n");
for (int i = 0; i < 8; i++) {
for (int j = 0; j < 8; j++)
printf("%f ", AtA(i, j));
printf("\n");
}
printf("Atb\n");
for (int i = 0; i < 8; i++)
printf("%f ", Atb(i));
printf("\n");
printf("result Amplitudes before nnls\n");
for (int i = 0; i < 8; i++)
printf("%f ", resultAmplitudesVector(i));
printf("\n");
#endif
// for fnnls
calo::multifit::MapSymM<float, NPULSES> matrixLForFnnls{shrMatrixLFnnlsStorage};
// run fast nnls
calo::multifit::fnnls(
AtA, Atb, resultAmplitudesVector, npassive, pulseOffsets, matrixLForFnnls, nnlsThresh, nMaxItersNNLS, 10, 10);
#ifdef HCAL_MAHI_GPUDEBUG
printf("result Amplitudes\n");
for (int i = 0; i < 8; i++)
printf("resultAmplitudes(%d) = %f\n", i, resultAmplitudesVector(i));
#endif
calo::multifit::calculateChiSq(matrixL, glbPulseMatrixView, resultAmplitudesVector, inputAmplitudesView, chi2);
auto const deltaChi2 = std::abs(chi2 - previous_chi2);
if (chi2 == chi2_2itersback && chi2 < previous_chi2)
break;
// update
chi2_2itersback = previous_chi2;
previous_chi2 = chi2;
// exit condition
if (deltaChi2 < deltaChi2Threashold)
break;
}
#ifdef HCAL_MAHI_GPUDEBUG
for (int i = 0; i < NPULSES; i++)
printf("pulseOffsets(%d) = %d outputAmplitudes(%d) = %f\n", i, pulseOffsets(i), i, resultAmplitudesVector(i));
printf("chi2 = %f\n", chi2);
#endif
outputChi2[gch] = chi2;
auto const idx_for_energy = std::abs(pulseOffsetValues[0]);
outputEnergy[gch] = (gain * resultAmplitudesVector(idx_for_energy)) * respCorrection;
/*
CMS_UNROLL_LOOP
for (int i=0; i<NPULSES; i++)
if (pulseOffsets[i] == soi)
// NOTE: gain is a number < 10^-3/4, multiply first to avoid stab issues
outputEnergy[gch] = (gain*resultAmplitudesVector(i))*respCorrection;
*/
}
} // namespace mahi
} // namespace hcal
namespace hcal {
namespace reconstruction {
void entryPoint(InputDataGPU const& inputGPU,
OutputDataGPU& outputGPU,
ConditionsProducts const& conditions,
ScratchDataGPU& scratch,
ConfigParameters const& configParameters,
cudaStream_t cudaStream) {
auto const totalChannels = inputGPU.f01HEDigis.size + inputGPU.f5HBDigis.size + inputGPU.f3HBDigis.size;
// protections when the detector is out
if (totalChannels == 0)
return;
// FIXME: may be move this assignment to emphasize this more clearly
// FIXME: number of channels for output might change given that
// some channesl might be filtered out
outputGPU.recHits.size = totalChannels;
// TODO: this can be lifted by implementing a separate kernel
// similar to the default one, but properly handling the diff in #sample
// or modifying existing one
auto const f01nsamples = compute_nsamples<Flavor1>(inputGPU.f01HEDigis.stride);
auto const f5nsamples = compute_nsamples<Flavor5>(inputGPU.f5HBDigis.stride);
auto const f3nsamples = compute_nsamples<Flavor3>(inputGPU.f3HBDigis.stride);
int constexpr windowSize = 8;
int const startingSample = f01nsamples - windowSize;
assert(startingSample == 0 || startingSample == 2);
if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f5HBDigis.stride > 0)
assert(f01nsamples == f5nsamples);
if (inputGPU.f01HEDigis.stride > 0 && inputGPU.f3HBDigis.stride > 0)
assert(f01nsamples == f3nsamples);
dim3 threadsPerBlock{windowSize, configParameters.kprep1dChannelsPerBlock};
int blocks = static_cast<uint32_t>(threadsPerBlock.y) > totalChannels
? 1
: (totalChannels + threadsPerBlock.y - 1) / threadsPerBlock.y;
int nbytesShared =
((2 * windowSize + 2) * sizeof(float) + sizeof(uint64_t)) * configParameters.kprep1dChannelsPerBlock;
hcal::mahi::kernel_prep1d_sameNumberOfSamples<<<blocks, threadsPerBlock, nbytesShared, cudaStream>>>(
scratch.amplitudes.get(),
scratch.noiseTerms.get(),
scratch.electronicNoiseTerms.get(),
outputGPU.recHits.energy.get(),
outputGPU.recHits.chi2.get(),
inputGPU.f01HEDigis.data.get(),
inputGPU.f5HBDigis.data.get(),
inputGPU.f3HBDigis.data.get(),
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
inputGPU.f01HEDigis.stride,
inputGPU.f5HBDigis.stride,
inputGPU.f3HBDigis.stride,
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
inputGPU.f5HBDigis.npresamples.get(),
scratch.soiSamples.get(),
outputGPU.recHits.energyM0.get(),
outputGPU.recHits.timeM0.get(),
outputGPU.recHits.did.get(),
totalChannels,
conditions.recoParams.param1,
conditions.recoParams.param2,
conditions.qieCoders.offsets,
conditions.qieCoders.slopes,
conditions.qieTypes.values,
conditions.pedestalWidths.values,
conditions.effectivePedestalWidths.values,
conditions.pedestals.values,
conditions.convertedEffectivePedestals ? conditions.convertedEffectivePedestals->values
: conditions.pedestals.values,
configParameters.useEffectivePedestals,
conditions.sipmParameters.type,
conditions.sipmParameters.fcByPE,
conditions.sipmCharacteristics.parLin1,
conditions.sipmCharacteristics.parLin2,
conditions.sipmCharacteristics.parLin3,
conditions.gains.values,
conditions.respCorrs.values,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1),
configParameters.sipmQTSShift,
configParameters.sipmQNTStoSum,
configParameters.firstSampleShift,
conditions.offsetForHashes,
configParameters.ts4Thresh,
startingSample);
cudaCheck(cudaGetLastError());
// 1024 is the max threads per block for gtx1080
// FIXME: take this from cuda service or something like that
uint32_t const channelsPerBlock = 1024 / (windowSize * conditions.pulseOffsetsHost.size());
dim3 threadsPerBlock2{windowSize, static_cast<uint32_t>(conditions.pulseOffsetsHost.size()), channelsPerBlock};
int blocks2 =
threadsPerBlock2.z > totalChannels ? 1 : (totalChannels + threadsPerBlock2.z - 1) / threadsPerBlock2.z;
#ifdef HCAL_MAHI_CPUDEBUG
std::cout << "threads: " << threadsPerBlock2.x << " " << threadsPerBlock2.y << " " << threadsPerBlock2.z
<< std::endl;
std::cout << "blocks: " << blocks2 << std::endl;
#endif
hcal::mahi::kernel_prep_pulseMatrices_sameNumberOfSamples<<<blocks2, threadsPerBlock2, 0, cudaStream>>>(
scratch.pulseMatrices.get(),
scratch.pulseMatricesM.get(),
scratch.pulseMatricesP.get(),
conditions.pulseOffsets.values,
scratch.amplitudes.get(),
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
totalChannels,
scratch.soiSamples.get(),
conditions.recoParams.ids,
conditions.recoParams.acc25nsVec,
conditions.recoParams.diff25nsItvlVec,
conditions.recoParams.accVarLenIdxMinusOneVec,
conditions.recoParams.diffVarItvlIdxMinusOneVec,
conditions.recoParams.accVarLenIdxZEROVec,
conditions.recoParams.diffVarItvlIdxZEROVec,
configParameters.meanTime,
configParameters.timeSigmaSiPM,
configParameters.timeSigmaHPD,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1),
conditions.offsetForHashes,
configParameters.applyTimeSlew,
configParameters.tzeroTimeSlew,
configParameters.slopeTimeSlew,
configParameters.tmaxTimeSlew);
cudaCheck(cudaGetLastError());
// number of samples is checked in above assert
if (conditions.pulseOffsetsHost.size() == 8u) {
// FIXME: provide constants from configuration
uint32_t threadsPerBlock = configParameters.kernelMinimizeThreads[0];
uint32_t blocks = threadsPerBlock > totalChannels ? 1 : (totalChannels + threadsPerBlock - 1) / threadsPerBlock;
auto const nbytesShared = 2 * threadsPerBlock * calo::multifit::MapSymM<float, 8>::total * sizeof(float);
hcal::mahi::kernel_minimize<8, 8><<<blocks, threadsPerBlock, nbytesShared, cudaStream>>>(
outputGPU.recHits.energy.get(),
outputGPU.recHits.chi2.get(),
scratch.amplitudes.get(),
scratch.pulseMatrices.get(),
scratch.pulseMatricesM.get(),
scratch.pulseMatricesP.get(),
conditions.pulseOffsets.values,
scratch.noiseTerms.get(),
scratch.electronicNoiseTerms.get(),
scratch.soiSamples.get(),
conditions.sipmParameters.auxi2,
conditions.pedestalWidths.values,
conditions.effectivePedestalWidths.values,
configParameters.useEffectivePedestals,
inputGPU.f01HEDigis.ids.get(),
inputGPU.f5HBDigis.ids.get(),
inputGPU.f3HBDigis.ids.get(),
conditions.gains.values,
conditions.respCorrs.values,
inputGPU.f01HEDigis.size,
inputGPU.f5HBDigis.size,
totalChannels,
conditions.offsetForHashes,
conditions.topology->maxDepthHB(),
conditions.topology->maxDepthHE(),
conditions.recConstants->getNPhi(1) > hcal::reconstruction::IPHI_MAX ? conditions.recConstants->getNPhi(1)
: hcal::reconstruction::IPHI_MAX,
conditions.topology->firstHBRing(),
conditions.topology->lastHBRing(),
conditions.topology->firstHERing(),
conditions.topology->lastHERing(),
conditions.recConstants->getEtaRange(0).second - conditions.recConstants->getEtaRange(0).first + 1,
conditions.topology->firstHERing() > conditions.topology->lastHERing()
? 0
: (conditions.topology->lastHERing() - conditions.topology->firstHERing() + 1));
} else {
throw cms::Exception("Invalid MahiGPU configuration")
<< "Currently support only 8 pulses and 8 time samples and provided: " << f01nsamples << " samples and "
<< conditions.pulseOffsetsHost.size() << " pulses" << std::endl;
}
}
} // namespace reconstruction
} // namespace hcal
|
c9661551969496157e368b0b02019a4b83862f73.hip | // !!! This is a file automatically generated by hipify!!!
#include "net.h"
//#include "layer.h"
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
static void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
static void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) {
if (stat != HIPBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
static void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) {
if (stat != HIPRAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
Net::Net(int nLayers, int cell_dim) {
// copy the layers
//input_buf_dim_=input_buf_dim;
cell_dim_=cell_dim;
//layers_=NULL;
//input_buf_ = NULL; ///< buffers for forward pass
propagate_buf_ = NULL; ///< buffers for forward pass
tmp_h_fw_n = NULL;
tmp_i_fw_n = NULL;
// back-propagation buffer
tmp_h_bw_n = NULL;
tmp_i_bw_n = NULL;
h_data_n = NULL;
c_data_n = NULL;
h_data_bw_n = NULL;
c_data_bw_n = NULL;
/*for(int i=0; i<nLayers; i++) {
Layer* L=NULL;
layers_.push_back(L);
}*/
// create empty buffers
//propagate_buf_.resize(NumLayers()+1);
}
Net::~Net() {
//cudaErrCheck(hipFree(input_buf_));
cudaErrCheck(hipFree(propagate_buf_)); //2 buffers
cudaErrCheck(hipFree(h_data_n));
cudaErrCheck(hipFree(c_data_n));
cudaErrCheck(hipFree(c_data_bw_n));
cudaErrCheck(hipFree(h_data_bw_n));
cudaErrCheck(hipFree(tmp_h_fw_n));
cudaErrCheck(hipFree(tmp_i_fw_n));
cudaErrCheck(hipFree(tmp_h_bw_n));
cudaErrCheck(hipFree(tmp_i_bw_n));
Destroy();
}
void Net::Resize(int seqLength){
//cudaErrCheck(hipFree(input_buf_));
cudaErrCheck(hipFree(propagate_buf_)); //2 buffers
cudaErrCheck(hipFree(h_data_n));
cudaErrCheck(hipFree(c_data_n));
cudaErrCheck(hipFree(c_data_bw_n));
cudaErrCheck(hipFree(h_data_bw_n));
cudaErrCheck(hipFree(tmp_h_fw_n));
cudaErrCheck(hipFree(tmp_i_fw_n));
cudaErrCheck(hipFree(tmp_h_bw_n));
cudaErrCheck(hipFree(tmp_i_bw_n));
//cudaErrCheck(hipMalloc((void**)&input_buf_, seqLength * input_buf_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&propagate_buf_,2* seqLength * 2 * cell_dim_ * sizeof(float))); //2 buffers
cudaErrCheck(hipMalloc((void**)&h_data_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_data_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&c_data_bw_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&h_data_bw_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&tmp_h_fw_n, 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&tmp_i_fw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&tmp_h_bw_n, 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(hipMalloc((void**)&tmp_i_bw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float)));
/*hipMemset(h_data_n, 0, cell_dim_*sizeof(float));
hipMemset(c_data_n, 0, cell_dim_*sizeof(float));
hipMemset(h_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float));
hipMemset(c_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float));
*/
}
void Net::Feedforward(hipblasHandle_t handle, float* in, float* out, int seqLength) {
// we need at least 2 input buffers
// propagate by using exactly 2 auxiliary buffers
int L = 0;
float time=0.f;
time+=layers_[L]->Propagate(handle, in, propagate_buf_ + (L%2)*seqLength * 2 * cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n);
for(L++; L<NumLayers(); L++) {
time+=layers_[L]->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_ ,propagate_buf_ + (L%2)*seqLength*2*cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n);
}
time+=Af_l_->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_, out, seqLength);
//printf("timing precise = %f ms", time);
//layers_[L]->Propagate(propagate_buf_[(L-1)%2], out); //not commented
// release the buffers we don't need anymore
}
int Net::OutputDim() {
return layers_.back()->OutputDim();
}
int Net::InputDim() {
return layers_.front()->InputDim();
}
Layer* Net::GetLayer(int layer) {
return layers_[layer];
}
void Net::SetLayer(int c, Layer *layer) {
delete layers_[c];
layers_[c] = layer;
}
void Net::AppendLayer(Layer* dynamically_allocated_layer) {
// append,
layers_.push_back(dynamically_allocated_layer);
}
void Net::AppendAffineTransformLayer(AffineTransform *dynamically_allocated_AffineTransform){
Af_l_=dynamically_allocated_AffineTransform;
}
void Net::Destroy() {
for(int i=0; i<NumLayers(); i++) {
delete layers_[i];
}
delete Af_l_;
layers_.resize(0);
}
| c9661551969496157e368b0b02019a4b83862f73.cu | #include "net.h"
//#include "layer.h"
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
static void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); }
static void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) {
if (stat != CUBLAS_STATUS_SUCCESS) {
fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line);
}
}
#define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); }
static void curandErrCheck_(curandStatus_t stat, const char *file, int line) {
if (stat != CURAND_STATUS_SUCCESS) {
fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line);
}
}
Net::Net(int nLayers, int cell_dim) {
// copy the layers
//input_buf_dim_=input_buf_dim;
cell_dim_=cell_dim;
//layers_=NULL;
//input_buf_ = NULL; ///< buffers for forward pass
propagate_buf_ = NULL; ///< buffers for forward pass
tmp_h_fw_n = NULL;
tmp_i_fw_n = NULL;
// back-propagation buffer
tmp_h_bw_n = NULL;
tmp_i_bw_n = NULL;
h_data_n = NULL;
c_data_n = NULL;
h_data_bw_n = NULL;
c_data_bw_n = NULL;
/*for(int i=0; i<nLayers; i++) {
Layer* L=NULL;
layers_.push_back(L);
}*/
// create empty buffers
//propagate_buf_.resize(NumLayers()+1);
}
Net::~Net() {
//cudaErrCheck(cudaFree(input_buf_));
cudaErrCheck(cudaFree(propagate_buf_)); //2 buffers
cudaErrCheck(cudaFree(h_data_n));
cudaErrCheck(cudaFree(c_data_n));
cudaErrCheck(cudaFree(c_data_bw_n));
cudaErrCheck(cudaFree(h_data_bw_n));
cudaErrCheck(cudaFree(tmp_h_fw_n));
cudaErrCheck(cudaFree(tmp_i_fw_n));
cudaErrCheck(cudaFree(tmp_h_bw_n));
cudaErrCheck(cudaFree(tmp_i_bw_n));
Destroy();
}
void Net::Resize(int seqLength){
//cudaErrCheck(cudaFree(input_buf_));
cudaErrCheck(cudaFree(propagate_buf_)); //2 buffers
cudaErrCheck(cudaFree(h_data_n));
cudaErrCheck(cudaFree(c_data_n));
cudaErrCheck(cudaFree(c_data_bw_n));
cudaErrCheck(cudaFree(h_data_bw_n));
cudaErrCheck(cudaFree(tmp_h_fw_n));
cudaErrCheck(cudaFree(tmp_i_fw_n));
cudaErrCheck(cudaFree(tmp_h_bw_n));
cudaErrCheck(cudaFree(tmp_i_bw_n));
//cudaErrCheck(cudaMalloc((void**)&input_buf_, seqLength * input_buf_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&propagate_buf_,2* seqLength * 2 * cell_dim_ * sizeof(float))); //2 buffers
cudaErrCheck(cudaMalloc((void**)&h_data_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_data_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&c_data_bw_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&h_data_bw_n, 2 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&tmp_h_fw_n, 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&tmp_i_fw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&tmp_h_bw_n, 4 * cell_dim_ * sizeof(float)));
cudaErrCheck(cudaMalloc((void**)&tmp_i_bw_n, (seqLength+1) * 4 * cell_dim_ * sizeof(float)));
/*cudaMemset(h_data_n, 0, cell_dim_*sizeof(float));
cudaMemset(c_data_n, 0, cell_dim_*sizeof(float));
cudaMemset(h_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float));
cudaMemset(c_data_bw_n + seqLength*cell_dim_, 0, cell_dim_*sizeof(float));
*/
}
void Net::Feedforward(cublasHandle_t handle, float* in, float* out, int seqLength) {
// we need at least 2 input buffers
// propagate by using exactly 2 auxiliary buffers
int L = 0;
float time=0.f;
time+=layers_[L]->Propagate(handle, in, propagate_buf_ + (L%2)*seqLength * 2 * cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n);
for(L++; L<NumLayers(); L++) {
time+=layers_[L]->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_ ,propagate_buf_ + (L%2)*seqLength*2*cell_dim_, seqLength, tmp_h_fw_n, tmp_i_fw_n, tmp_h_bw_n, tmp_i_bw_n, h_data_n, c_data_n, h_data_bw_n, c_data_bw_n);
}
time+=Af_l_->Propagate(handle, propagate_buf_ + ((L-1)%2)*seqLength*2*cell_dim_, out, seqLength);
//printf("timing precise = %f ms", time);
//layers_[L]->Propagate(propagate_buf_[(L-1)%2], out); //not commented
// release the buffers we don't need anymore
}
int Net::OutputDim() {
return layers_.back()->OutputDim();
}
int Net::InputDim() {
return layers_.front()->InputDim();
}
Layer* Net::GetLayer(int layer) {
return layers_[layer];
}
void Net::SetLayer(int c, Layer *layer) {
delete layers_[c];
layers_[c] = layer;
}
void Net::AppendLayer(Layer* dynamically_allocated_layer) {
// append,
layers_.push_back(dynamically_allocated_layer);
}
void Net::AppendAffineTransformLayer(AffineTransform *dynamically_allocated_AffineTransform){
Af_l_=dynamically_allocated_AffineTransform;
}
void Net::Destroy() {
for(int i=0; i<NumLayers(); i++) {
delete layers_[i];
}
delete Af_l_;
layers_.resize(0);
}
|
a87d4b87c297ff70ddbd748dce028e65f3295eca.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray input and returns a
* gpuArray output, e.g. B=mexFunction(A).
*
* Copyright 2012 The MathWorks, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdint.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cstdlib>
#include <algorithm>
#include <iostream>
using namespace std;
const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){
__shared__ float sW[nt0*NrankMax], sdata[Nthreads+nt0];
float x;
int tid, tid0, bid, i, NT, Nfilt, NfiltALL;
tid = threadIdx.x;
bid = blockIdx.x;
Nfilt = (int) Params[1];
NT = (int) Params[0];
NfiltALL = Nfilt * ((int) Params[6]);
if(tid<nt0*((int) Params[6]))
sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0];
__syncthreads();
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid];
sdata[nt0+tid] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
x = 0.0f;
while(bid<NfiltALL){
for(i=0;i<nt0;i++)
x += sW[i + (bid/Nfilt)*nt0] * sdata[i+tid];
bid+=Nfilt;
}
bid = blockIdx.x;
conv_sig[tid0 + tid + NT*bid] = x;
tid0+=Nthreads;
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const float *data,
const float *mu, const float *lam, float *xbest, float *err, int *ftype){
int tid, tid0, i, bid, NT, Nfilt, ibest = 0;
float Th, Cf, Ci, xb, Cbest = 0.0f;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Th = (float) Params[2];
tid0 = tid + bid * Nthreads;
if (tid0<NT){
for (i=0; i<Nfilt;i++){
Ci = data[tid0 + NT * i] + mu[i] * lam[i];
Cf = Ci * Ci / (lam[i] + 1.0f) - lam[i]*mu[i]*mu[i];
if (Cf > Cbest){
Cbest = Cf;
xb = Ci / (lam[i] + 1);
ibest = i;
}
}
if (Cbest > Th*Th){
err[tid0] = Cbest;
xbest[tid0] = xb;
ftype[tid0] = ibest;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err,
const int *ftype, int *st, int *id, float *x, float *C, int *counter){
int indx, maxFR, NTOT, tid, bid, NT, tid0, j;
volatile __shared__ float sdata[Nthreads+2*lockout+1];
bool flag=0;
float err0;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
maxFR = (int) Params[3];
tid0 = bid * Nthreads;
if(tid0<NT-Nthreads-2*lockout-1){
if (tid<2*lockout)
sdata[tid] = err[tid0 + tid];
sdata[tid+2*lockout] = err[2*lockout + tid0 + tid];
__syncthreads();
err0 = sdata[tid+lockout];
if(err0>1e-10){
flag = 0;
for(j=-lockout;j<=lockout;j++)
if(sdata[tid+lockout+j]>err0){
flag = 1;
break;
}
if(flag==0){
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = tid+lockout + tid0;
id[indx] = ftype[tid+lockout + tid0];
x[indx] = xbest[tid+lockout + tid0];
C[indx] = err0;
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void subSpikes(const double *Params, const int *st, const int *id,
const float *x, const int *counter, float *dout, const float *WtW){
int tid, bid, NT, ind, tcurr, Nfilt;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
for(ind=counter[1]; ind<counter[0];ind++){
tcurr = tid + st[ind]-nt0+1;
if (tcurr>=0 & tcurr<NT)
dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid];
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/*
* Host code
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare input variables*/
double *Params, *d_Params;
int blocksPerGrid, NT, maxFR, Nchan;
int const threadsPerBlock = Nthreads;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* read Params and copy to GPU */
Params = (double*) mxGetData(prhs[0]);
NT = (int) Params[0];
blocksPerGrid = (int) Params[1];
maxFR = (int) Params[3];
Nchan = (int) Params[5];
hipMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0]));
hipMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),hipMemcpyHostToDevice);
/* collect input GPU variables*/
mxGPUArray const *W, *data, *WtW, *mu, *lam;
const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam;
data = mxGPUCreateFromMxArray(prhs[1]);
d_data = (float const *)(mxGPUGetDataReadOnly(data));
W = mxGPUCreateFromMxArray(prhs[2]);
d_W = (float const *)(mxGPUGetDataReadOnly(W));
WtW = mxGPUCreateFromMxArray(prhs[3]);
d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW));
mu = mxGPUCreateFromMxArray(prhs[4]);
d_mu = (float const *)(mxGPUGetDataReadOnly(mu));
lam = mxGPUCreateFromMxArray(prhs[5]);
d_lam = (float const *)(mxGPUGetDataReadOnly(lam));
/* allocate new GPU variables*/
float *d_err,*d_C, *d_xbest, *d_x, *d_dout;
int *d_st, *d_ftype, *d_id, *d_counter;
hipMalloc(&d_dout, NT * blocksPerGrid* sizeof(float));
hipMalloc(&d_err, NT * sizeof(float));
hipMalloc(&d_xbest, NT * sizeof(float));
hipMalloc(&d_ftype, NT * sizeof(int));
hipMalloc(&d_st, maxFR * sizeof(int));
hipMalloc(&d_id, maxFR * sizeof(int));
hipMalloc(&d_x, maxFR * sizeof(float));
hipMalloc(&d_C, maxFR * sizeof(float));
hipMalloc(&d_counter, 2*sizeof(int));
hipMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float));
hipMemset(d_counter, 0, 2*sizeof(int));
hipMemset(d_st, 0, maxFR * sizeof(int));
hipMemset(d_id, 0, maxFR * sizeof(int));
hipMemset(d_x, 0, maxFR * sizeof(float));
hipMemset(d_C, 0, maxFR * sizeof(float));
int *counter;
counter = (int*) calloc(1,sizeof(int));
hipLaunchKernelGGL(( Conv1D), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_Params, d_data, d_W, d_dout);
for(int k=0;k<(int) Params[4];k++){
hipMemset(d_err, 0, NT * sizeof(float));
hipMemset(d_ftype, 0, NT * sizeof(int));
hipMemset(d_xbest, 0, NT * sizeof(float));
hipLaunchKernelGGL(( bestFilter), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype);
hipLaunchKernelGGL(( cleanup_spikes), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter);
hipMemcpy(counter, d_counter, sizeof(int), hipMemcpyDeviceToHost);
if (counter[0]>maxFR){
counter[0] = maxFR;
hipMemcpy(d_counter, counter, sizeof(int), hipMemcpyHostToDevice);
}
int ntidy = 1024/(2*nt0-1);
dim3 block(2*nt0-1, ntidy);
hipLaunchKernelGGL(( subSpikes), dim3(ceil(blocksPerGrid/ntidy)), dim3(block), 0, 0, d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW);
hipMemcpy(d_counter+1, d_counter, sizeof(int), hipMemcpyDeviceToHost);
if(counter[0]==maxFR)
break;
}
float *x, *C;
int *st, *id;
int minSize;
if (counter[0]<maxFR) minSize = counter[0];
else minSize = maxFR;
const mwSize dimst[] = {minSize,1};
plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
st = (int*) mxGetData(plhs[0]);
plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
id = (int*) mxGetData(plhs[1]);
plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
x = (float*) mxGetData(plhs[2]);
plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
C = (float*) mxGetData(plhs[3]);
hipMemcpy(st, d_st, minSize * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(id, d_id, minSize * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(x, d_x, minSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(C, d_C, minSize * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_ftype);
hipFree(d_err);
hipFree(d_xbest);
hipFree(d_st);
hipFree(d_id);
hipFree(d_x);
hipFree(d_C);
hipFree(d_counter);
hipFree(d_Params);
hipFree(d_dout);
mxGPUDestroyGPUArray(data);
mxGPUDestroyGPUArray(WtW);
mxGPUDestroyGPUArray(W);
mxGPUDestroyGPUArray(mu);
mxGPUDestroyGPUArray(lam);
}
| a87d4b87c297ff70ddbd748dce028e65f3295eca.cu | /*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray input and returns a
* gpuArray output, e.g. B=mexFunction(A).
*
* Copyright 2012 The MathWorks, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <stdint.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cstdlib>
#include <algorithm>
#include <iostream>
using namespace std;
const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){
__shared__ float sW[nt0*NrankMax], sdata[Nthreads+nt0];
float x;
int tid, tid0, bid, i, NT, Nfilt, NfiltALL;
tid = threadIdx.x;
bid = blockIdx.x;
Nfilt = (int) Params[1];
NT = (int) Params[0];
NfiltALL = Nfilt * ((int) Params[6]);
if(tid<nt0*((int) Params[6]))
sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0];
__syncthreads();
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid];
sdata[nt0+tid] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
x = 0.0f;
while(bid<NfiltALL){
for(i=0;i<nt0;i++)
x += sW[i + (bid/Nfilt)*nt0] * sdata[i+tid];
bid+=Nfilt;
}
bid = blockIdx.x;
conv_sig[tid0 + tid + NT*bid] = x;
tid0+=Nthreads;
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const float *data,
const float *mu, const float *lam, float *xbest, float *err, int *ftype){
int tid, tid0, i, bid, NT, Nfilt, ibest = 0;
float Th, Cf, Ci, xb, Cbest = 0.0f;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Th = (float) Params[2];
tid0 = tid + bid * Nthreads;
if (tid0<NT){
for (i=0; i<Nfilt;i++){
Ci = data[tid0 + NT * i] + mu[i] * lam[i];
Cf = Ci * Ci / (lam[i] + 1.0f) - lam[i]*mu[i]*mu[i];
if (Cf > Cbest){
Cbest = Cf;
xb = Ci / (lam[i] + 1);
ibest = i;
}
}
if (Cbest > Th*Th){
err[tid0] = Cbest;
xbest[tid0] = xb;
ftype[tid0] = ibest;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err,
const int *ftype, int *st, int *id, float *x, float *C, int *counter){
int indx, maxFR, NTOT, tid, bid, NT, tid0, j;
volatile __shared__ float sdata[Nthreads+2*lockout+1];
bool flag=0;
float err0;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
maxFR = (int) Params[3];
tid0 = bid * Nthreads;
if(tid0<NT-Nthreads-2*lockout-1){
if (tid<2*lockout)
sdata[tid] = err[tid0 + tid];
sdata[tid+2*lockout] = err[2*lockout + tid0 + tid];
__syncthreads();
err0 = sdata[tid+lockout];
if(err0>1e-10){
flag = 0;
for(j=-lockout;j<=lockout;j++)
if(sdata[tid+lockout+j]>err0){
flag = 1;
break;
}
if(flag==0){
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = tid+lockout + tid0;
id[indx] = ftype[tid+lockout + tid0];
x[indx] = xbest[tid+lockout + tid0];
C[indx] = err0;
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void subSpikes(const double *Params, const int *st, const int *id,
const float *x, const int *counter, float *dout, const float *WtW){
int tid, bid, NT, ind, tcurr, Nfilt;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
for(ind=counter[1]; ind<counter[0];ind++){
tcurr = tid + st[ind]-nt0+1;
if (tcurr>=0 & tcurr<NT)
dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid];
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/*
* Host code
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare input variables*/
double *Params, *d_Params;
int blocksPerGrid, NT, maxFR, Nchan;
int const threadsPerBlock = Nthreads;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* read Params and copy to GPU */
Params = (double*) mxGetData(prhs[0]);
NT = (int) Params[0];
blocksPerGrid = (int) Params[1];
maxFR = (int) Params[3];
Nchan = (int) Params[5];
cudaMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0]));
cudaMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),cudaMemcpyHostToDevice);
/* collect input GPU variables*/
mxGPUArray const *W, *data, *WtW, *mu, *lam;
const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam;
data = mxGPUCreateFromMxArray(prhs[1]);
d_data = (float const *)(mxGPUGetDataReadOnly(data));
W = mxGPUCreateFromMxArray(prhs[2]);
d_W = (float const *)(mxGPUGetDataReadOnly(W));
WtW = mxGPUCreateFromMxArray(prhs[3]);
d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW));
mu = mxGPUCreateFromMxArray(prhs[4]);
d_mu = (float const *)(mxGPUGetDataReadOnly(mu));
lam = mxGPUCreateFromMxArray(prhs[5]);
d_lam = (float const *)(mxGPUGetDataReadOnly(lam));
/* allocate new GPU variables*/
float *d_err,*d_C, *d_xbest, *d_x, *d_dout;
int *d_st, *d_ftype, *d_id, *d_counter;
cudaMalloc(&d_dout, NT * blocksPerGrid* sizeof(float));
cudaMalloc(&d_err, NT * sizeof(float));
cudaMalloc(&d_xbest, NT * sizeof(float));
cudaMalloc(&d_ftype, NT * sizeof(int));
cudaMalloc(&d_st, maxFR * sizeof(int));
cudaMalloc(&d_id, maxFR * sizeof(int));
cudaMalloc(&d_x, maxFR * sizeof(float));
cudaMalloc(&d_C, maxFR * sizeof(float));
cudaMalloc(&d_counter, 2*sizeof(int));
cudaMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float));
cudaMemset(d_counter, 0, 2*sizeof(int));
cudaMemset(d_st, 0, maxFR * sizeof(int));
cudaMemset(d_id, 0, maxFR * sizeof(int));
cudaMemset(d_x, 0, maxFR * sizeof(float));
cudaMemset(d_C, 0, maxFR * sizeof(float));
int *counter;
counter = (int*) calloc(1,sizeof(int));
Conv1D<<<blocksPerGrid,threadsPerBlock>>>(d_Params, d_data, d_W, d_dout);
for(int k=0;k<(int) Params[4];k++){
cudaMemset(d_err, 0, NT * sizeof(float));
cudaMemset(d_ftype, 0, NT * sizeof(int));
cudaMemset(d_xbest, 0, NT * sizeof(float));
bestFilter<<<NT/Nthreads,threadsPerBlock>>>( d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype);
cleanup_spikes<<<NT/Nthreads,threadsPerBlock>>>(d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter);
cudaMemcpy(counter, d_counter, sizeof(int), cudaMemcpyDeviceToHost);
if (counter[0]>maxFR){
counter[0] = maxFR;
cudaMemcpy(d_counter, counter, sizeof(int), cudaMemcpyHostToDevice);
}
int ntidy = 1024/(2*nt0-1);
dim3 block(2*nt0-1, ntidy);
subSpikes<<<ceil(blocksPerGrid/ntidy), block>>>(d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW);
cudaMemcpy(d_counter+1, d_counter, sizeof(int), cudaMemcpyDeviceToHost);
if(counter[0]==maxFR)
break;
}
float *x, *C;
int *st, *id;
int minSize;
if (counter[0]<maxFR) minSize = counter[0];
else minSize = maxFR;
const mwSize dimst[] = {minSize,1};
plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
st = (int*) mxGetData(plhs[0]);
plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
id = (int*) mxGetData(plhs[1]);
plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
x = (float*) mxGetData(plhs[2]);
plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
C = (float*) mxGetData(plhs[3]);
cudaMemcpy(st, d_st, minSize * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(id, d_id, minSize * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(x, d_x, minSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(C, d_C, minSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_ftype);
cudaFree(d_err);
cudaFree(d_xbest);
cudaFree(d_st);
cudaFree(d_id);
cudaFree(d_x);
cudaFree(d_C);
cudaFree(d_counter);
cudaFree(d_Params);
cudaFree(d_dout);
mxGPUDestroyGPUArray(data);
mxGPUDestroyGPUArray(WtW);
mxGPUDestroyGPUArray(W);
mxGPUDestroyGPUArray(mu);
mxGPUDestroyGPUArray(lam);
}
|
015c49430e075568f03240b414f66a714181f4f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// MP 1
#include "wb.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
| 015c49430e075568f03240b414f66a714181f4f6.cu | // MP 1
#include "wb.h"
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
}
int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", inputLength);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, hostOutput, inputLength);
free(hostInput1);
free(hostInput2);
free(hostOutput);
return 0;
}
|
53e58fb98bcd76f4adb728b26ed9528f539da86b.hip | // !!! This is a file automatically generated by hipify!!!
#define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h>
PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
{
Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr;
PetscErrorCode ierr;
PetscInt i;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
if (d_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
}
}
if (o_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
}
}
if (!B->preallocated) {
/* Explicitly create 2 MATSEQAIJCUSPARSE matrices. */
ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
}
ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr);
ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr);
ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr);
ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr);
B->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
PetscErrorCode MatCreateVecs_MPIAIJCUSPARSE(Mat mat,Vec *right,Vec *left)
{
PetscErrorCode ierr;
PetscInt rbs,cbs;
PetscFunctionBegin;
ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr);
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr);
ierr = VecSetType(*right,VECCUDA);CHKERRQ(ierr);
ierr = VecSetLayout(*right,mat->cmap);CHKERRQ(ierr);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr);
ierr = VecSetType(*left,VECCUDA);CHKERRQ(ierr);
ierr = VecSetLayout(*left,mat->rmap);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multtransposeadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT_DIAG:
cusparseStruct->diagGPUMatFormat = format;
break;
case MAT_CUSPARSE_MULT_OFFDIAG:
cusparseStruct->offdiagGPUMatFormat = format;
break;
case MAT_CUSPARSE_ALL:
cusparseStruct->diagGPUMatFormat = format;
cusparseStruct->offdiagGPUMatFormat = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
MatCUSPARSEStorageFormat format;
PetscErrorCode ierr;
PetscBool flg;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)A);
if (A->factortype==MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);
}
}
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType);
PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
Mat_MPIAIJ *mpiaij;
PetscFunctionBegin;
mpiaij = (Mat_MPIAIJ*)A->data;
ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr);
if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
hipError_t err;
hipsparseStatus_t stat;
PetscFunctionBegin;
try {
ierr = MatCUSPARSEClearHandle(a->A);CHKERRQ(ierr);
ierr = MatCUSPARSEClearHandle(a->B);CHKERRQ(ierr);
stat = hipsparseDestroy(cusparseStruct->handle);CHKERRCUDA(stat);
err = hipStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err);
delete cusparseStruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex);
}
cusparseStruct = 0;
ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a;
Mat_MPIAIJCUSPARSE * cusparseStruct;
hipError_t err;
hipsparseStatus_t stat;
PetscFunctionBegin;
ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr);
a = (Mat_MPIAIJ*)A->data;
a->spptr = new Mat_MPIAIJCUSPARSE;
cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR;
cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR;
stat = hipsparseCreate(&(cusparseStruct->handle));CHKERRCUDA(stat);
err = hipStreamCreate(&(cusparseStruct->stream));CHKERRCUDA(err);
A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE;
A->ops->getvecs = MatCreateVecs_MPIAIJCUSPARSE;
A->ops->mult = MatMult_MPIAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE;
A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE;
A->ops->destroy = MatDestroy_MPIAIJCUSPARSE;
ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradigm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) {
ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
} else {
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*M
MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator,
and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators,
MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
for communicators controlling multiple processes. It is recommended that you call both of
the above preallocation routines for simplicity.
Options Database Keys:
+ -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
. -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M
M*/
| 53e58fb98bcd76f4adb728b26ed9528f539da86b.cu | #define PETSC_SKIP_SPINLOCK
#include <petscconf.h>
#include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/
#include <../src/mat/impls/aij/mpi/mpicusparse/mpicusparsematimpl.h>
PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJCUSPARSE(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
{
Mat_MPIAIJ *b = (Mat_MPIAIJ*)B->data;
Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)b->spptr;
PetscErrorCode ierr;
PetscInt i;
PetscFunctionBegin;
ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
if (d_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
}
}
if (o_nnz) {
for (i=0; i<B->rmap->n; i++) {
if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
}
}
if (!B->preallocated) {
/* Explicitly create 2 MATSEQAIJCUSPARSE matrices. */
ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
ierr = MatSetType(b->A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
ierr = MatSetType(b->B,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
}
ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
ierr = MatCUSPARSESetFormat(b->A,MAT_CUSPARSE_MULT,cusparseStruct->diagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPARSESetFormat(b->B,MAT_CUSPARSE_MULT,cusparseStruct->offdiagGPUMatFormat);CHKERRQ(ierr);
ierr = MatCUSPARSESetHandle(b->A,cusparseStruct->handle);CHKERRQ(ierr);
ierr = MatCUSPARSESetHandle(b->B,cusparseStruct->handle);CHKERRQ(ierr);
ierr = MatCUSPARSESetStream(b->A,cusparseStruct->stream);CHKERRQ(ierr);
ierr = MatCUSPARSESetStream(b->B,cusparseStruct->stream);CHKERRQ(ierr);
B->preallocated = PETSC_TRUE;
PetscFunctionReturn(0);
}
PetscErrorCode MatCreateVecs_MPIAIJCUSPARSE(Mat mat,Vec *right,Vec *left)
{
PetscErrorCode ierr;
PetscInt rbs,cbs;
PetscFunctionBegin;
ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr);
if (right) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr);
ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr);
ierr = VecSetType(*right,VECCUDA);CHKERRQ(ierr);
ierr = VecSetLayout(*right,mat->cmap);CHKERRQ(ierr);
}
if (left) {
ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr);
ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr);
ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr);
ierr = VecSetType(*left,VECCUDA);CHKERRQ(ierr);
ierr = VecSetLayout(*left,mat->rmap);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatMult_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatMultTranspose_MPIAIJCUSPARSE(Mat A,Vec xx,Vec yy)
{
/* This multiplication sequence is different sequence
than the CPU version. In particular, the diagonal block
multiplication kernel is launched in one stream. Then,
in a separate stream, the data transfers from DeviceToHost
(with MPI messaging in between), then HostToDevice are
launched. Once the data transfer stream is synchronized,
to ensure messaging is complete, the MatMultAdd kernel
is launched in the original (MatMult) stream to protect
against race conditions.
This sequence should only be called for GPU computation. */
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
PetscErrorCode ierr;
PetscInt nt;
PetscFunctionBegin;
ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
ierr = VecScatterInitializeForGPU(a->Mvctx,xx,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
ierr = (*a->B->ops->multtransposeadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
ierr = VecScatterFinalizeForGPU(a->Mvctx);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PetscErrorCode MatCUSPARSESetFormat_MPIAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format)
{
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE * cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
PetscFunctionBegin;
switch (op) {
case MAT_CUSPARSE_MULT_DIAG:
cusparseStruct->diagGPUMatFormat = format;
break;
case MAT_CUSPARSE_MULT_OFFDIAG:
cusparseStruct->offdiagGPUMatFormat = format;
break;
case MAT_CUSPARSE_ALL:
cusparseStruct->diagGPUMatFormat = format;
cusparseStruct->offdiagGPUMatFormat = format;
break;
default:
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. Only MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_DIAG, and MAT_CUSPARSE_MULT_ALL are currently supported.",op);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatSetFromOptions_MPIAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A)
{
MatCUSPARSEStorageFormat format;
PetscErrorCode ierr;
PetscBool flg;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
PetscFunctionBegin;
ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJCUSPARSE options");CHKERRQ(ierr);
ierr = PetscObjectOptionsBegin((PetscObject)A);
if (A->factortype==MAT_FACTOR_NONE) {
ierr = PetscOptionsEnum("-mat_cusparse_mult_diag_storage_format","sets storage format of the diagonal blocks of (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_DIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusparse_mult_offdiag_storage_format","sets storage format of the off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->offdiagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT_OFFDIAG,format);CHKERRQ(ierr);
}
ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of the diagonal and off-diagonal blocks (mpi)aijcusparse gpu matrices for SpMV",
"MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparseStruct->diagGPUMatFormat,(PetscEnum*)&format,&flg);CHKERRQ(ierr);
if (flg) {
ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr);
}
}
ierr = PetscOptionsEnd();CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat,MatAssemblyType);
PetscErrorCode MatAssemblyEnd_MPIAIJCUSPARSE(Mat A,MatAssemblyType mode)
{
PetscErrorCode ierr;
Mat_MPIAIJ *mpiaij;
PetscFunctionBegin;
mpiaij = (Mat_MPIAIJ*)A->data;
ierr = MatAssemblyEnd_MPIAIJ(A,mode);CHKERRQ(ierr);
if (!A->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
ierr = VecSetType(mpiaij->lvec,VECSEQCUDA);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
PetscErrorCode MatDestroy_MPIAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data;
Mat_MPIAIJCUSPARSE *cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
cudaError_t err;
cusparseStatus_t stat;
PetscFunctionBegin;
try {
ierr = MatCUSPARSEClearHandle(a->A);CHKERRQ(ierr);
ierr = MatCUSPARSEClearHandle(a->B);CHKERRQ(ierr);
stat = cusparseDestroy(cusparseStruct->handle);CHKERRCUDA(stat);
err = cudaStreamDestroy(cusparseStruct->stream);CHKERRCUDA(err);
delete cusparseStruct;
} catch(char *ex) {
SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Mat_MPIAIJCUSPARSE error: %s", ex);
}
cusparseStruct = 0;
ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJCUSPARSE(Mat A)
{
PetscErrorCode ierr;
Mat_MPIAIJ *a;
Mat_MPIAIJCUSPARSE * cusparseStruct;
cudaError_t err;
cusparseStatus_t stat;
PetscFunctionBegin;
ierr = MatCreate_MPIAIJ(A);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJCUSPARSE);CHKERRQ(ierr);
a = (Mat_MPIAIJ*)A->data;
a->spptr = new Mat_MPIAIJCUSPARSE;
cusparseStruct = (Mat_MPIAIJCUSPARSE*)a->spptr;
cusparseStruct->diagGPUMatFormat = MAT_CUSPARSE_CSR;
cusparseStruct->offdiagGPUMatFormat = MAT_CUSPARSE_CSR;
stat = cusparseCreate(&(cusparseStruct->handle));CHKERRCUDA(stat);
err = cudaStreamCreate(&(cusparseStruct->stream));CHKERRCUDA(err);
A->ops->assemblyend = MatAssemblyEnd_MPIAIJCUSPARSE;
A->ops->getvecs = MatCreateVecs_MPIAIJCUSPARSE;
A->ops->mult = MatMult_MPIAIJCUSPARSE;
A->ops->multtranspose = MatMultTranspose_MPIAIJCUSPARSE;
A->ops->setfromoptions = MatSetFromOptions_MPIAIJCUSPARSE;
A->ops->destroy = MatDestroy_MPIAIJCUSPARSE;
ierr = PetscObjectChangeTypeName((PetscObject)A,MATMPIAIJCUSPARSE);CHKERRQ(ierr);
ierr = PetscObjectComposeFunction((PetscObject)A,"MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_MPIAIJCUSPARSE);CHKERRQ(ierr);
PetscFunctionReturn(0);
}
/*@
MatCreateAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format
(the default parallel PETSc format). This matrix will ultimately pushed down
to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix
assembly performance the user should preallocate the matrix storage by setting
the parameter nz (or the array nnz). By setting these parameters accurately,
performance during matrix assembly can be increased by more than a factor of 50.
Collective on MPI_Comm
Input Parameters:
+ comm - MPI communicator, set to PETSC_COMM_SELF
. m - number of rows
. n - number of columns
. nz - number of nonzeros per row (same for all rows)
- nnz - array containing the number of nonzeros in the various rows
(possibly different for each row) or NULL
Output Parameter:
. A - the matrix
It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
MatXXXXSetPreallocation() paradigm instead of this routine directly.
[MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
Notes:
If nnz is given then nz is ignored
The AIJ format (also called the Yale sparse matrix format or
compressed row storage), is fully compatible with standard Fortran 77
storage. That is, the stored row and column indices can begin at
either one (as in Fortran) or zero. See the users' manual for details.
Specify the preallocated storage with either nz or nnz (not both).
Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory
allocation. For large problems you MUST preallocate memory or you
will get TERRIBLE performance, see the users' manual chapter on matrices.
By default, this format uses inodes (identical nodes) when possible, to
improve numerical efficiency of matrix-vector products and solves. We
search for consecutive rows with the same nonzero structure, thereby
reusing matrix information to achieve increased efficiency.
Level: intermediate
.seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATMPIAIJCUSPARSE, MATAIJCUSPARSE
@*/
PetscErrorCode MatCreateAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
{
PetscErrorCode ierr;
PetscMPIInt size;
PetscFunctionBegin;
ierr = MatCreate(comm,A);CHKERRQ(ierr);
ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
if (size > 1) {
ierr = MatSetType(*A,MATMPIAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
} else {
ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr);
ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
/*M
MATAIJCUSPARSE - MATMPIAIJCUSPARSE = "aijcusparse" = "mpiaijcusparse" - A matrix type to be used for sparse matrices.
A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either
CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later.
All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library.
This matrix type is identical to MATSEQAIJCUSPARSE when constructed with a single process communicator,
and MATMPIAIJCUSPARSE otherwise. As a result, for single process communicators,
MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
for communicators controlling multiple processes. It is recommended that you call both of
the above preallocation routines for simplicity.
Options Database Keys:
+ -mat_type mpiaijcusparse - sets the matrix type to "mpiaijcusparse" during a call to MatSetFromOptions()
. -mat_cusparse_storage_format csr - sets the storage format of diagonal and off-diagonal matrices during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
. -mat_cusparse_mult_diag_storage_format csr - sets the storage format of diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
- -mat_cusparse_mult_offdiag_storage_format csr - sets the storage format of off-diagonal matrix during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid).
Level: beginner
.seealso: MatCreateAIJCUSPARSE(), MATSEQAIJCUSPARSE, MatCreateSeqAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation
M
M*/
|
bifrost.hip | // !!! This is a file automatically generated by hipify!!!
#include <data_types/timeseries.hpp>
#include <data_types/fourierseries.hpp>
#include <data_types/candidates.hpp>
#include <data_types/filterbank.hpp>
#include <pipeline/error.hpp>
#include <pipeline/default_params.hpp>
#include <pipeline/pipeline_types.hpp>
#include <pipeline/pipeline.hpp>
#include <transforms/dedisperser.hpp>
#include <transforms/resampler.hpp>
#include <transforms/folder.hpp>
#include <transforms/ffter.hpp>
#include <transforms/dereddener.hpp>
#include <transforms/spectrumformer.hpp>
#include <transforms/birdiezapper.hpp>
#include <transforms/peakfinder.hpp>
#include <transforms/distiller.hpp>
#include <transforms/harmonicfolder.hpp>
#include <transforms/scorer.hpp>
#include <utils/mean_variance.hpp>
#include <utils/exceptions.hpp>
#include <utils/utils.hpp>
#include <utils/stats.hpp>
#include <utils/stopwatch.hpp>
#include <utils/progress_bar.hpp>
#include <utils/cmdline.hpp>
#include <utils/output_stats.hpp>
#include <string>
#include <iostream>
#include <fstream> // NEEDED FOR TEST OUTPUT FILES
#include <stdio.h>
#include <unistd.h>
#include "hip/hip_runtime.h"
#include "hipfft.h"
#include "dedisp.h"
#include "pthread.h"
#include <cmath>
#include <map>
#include <sstream>
using std::cin;
using std::cout;
using std::endl;
using std::cerr;
#define POST_PROC 1
struct dedisp_plan_struct {
// Multi-GPU parameters
dedisp_size device_count;
// Size parameters
dedisp_size dm_count;
dedisp_size nchans;
dedisp_size max_delay;
dedisp_size gulp_size;
// Physical parameters
dedisp_float dt;
dedisp_float f0;
dedisp_float df;
double mean;
double std_dev;
// Host arrays
std::vector<dedisp_float> dm_list; // size = dm_count
std::vector<dedisp_float> delay_table; // size = nchans
std::vector<dedisp_bool> killmask; // size = nchans
std::vector<dedisp_size> scrunch_list; // size = dm_count
// Device arrays //NEW: one for each GPU
std::vector< thrust::device_vector<dedisp_float> > d_dm_list;
std::vector< thrust::device_vector<dedisp_float> > d_delay_table;
std::vector< thrust::device_vector<dedisp_bool> > d_killmask;
std::vector< thrust::device_vector<dedisp_size> > d_scrunch_list;
//StreamType stream;
// Scrunching parameters
dedisp_bool scrunching_enabled;
dedisp_float pulse_width;
dedisp_float scrunch_tol;
};
class DMDispenser {
private:
DispersionTrials<unsigned char>& trials;
pthread_mutex_t mutex;
int dm_idx;
int count;
ProgressBar* progress;
bool use_progress_bar;
public:
DMDispenser(DispersionTrials<unsigned char>& trials)
:trials(trials),dm_idx(0),use_progress_bar(false){
count = trials.get_count();
pthread_mutex_init(&mutex, NULL);
}
void enable_progress_bar(){
progress = new ProgressBar();
use_progress_bar = true;
}
int get_dm_trial_idx(void){
pthread_mutex_lock(&mutex);
int retval;
if (dm_idx==0)
if (use_progress_bar){
printf("Releasing DMs to workers...\n");
progress->start();
}
if (dm_idx >= trials.get_count()){
retval = -1;
if (use_progress_bar)
progress->stop();
} else {
if (use_progress_bar)
progress->set_progress((float)dm_idx/count);
retval = dm_idx;
dm_idx++;
}
pthread_mutex_unlock(&mutex);
return retval;
}
~DMDispenser(){
if (use_progress_bar)
delete progress;
pthread_mutex_destroy(&mutex);
}
};
class Worker {
private:
DispersionTrials<unsigned char>& trials;
DMDispenser& manager;
CmdLineOptions& args;
AccelerationPlan& acc_plan;
unsigned int size;
int device;
std::map<std::string,Stopwatch> timers;
public:
CandidateCollection dm_trial_cands;
Worker(DispersionTrials<unsigned char>& trials, DMDispenser& manager,
AccelerationPlan& acc_plan, CmdLineOptions& args, unsigned int size, int device)
:trials(trials),manager(manager),acc_plan(acc_plan),args(args),size(size),device(device){}
void start(void)
{
//Generate some timer instances for benchmarking
//timers["get_dm_trial"] = Stopwatch();
//timers["copy_to_device"] = Stopwatch();
//timers["rednoise"] = Stopwatch();
//timers["search"] = Stopwatch();
hipSetDevice(device);
Stopwatch pass_timer;
pass_timer.start();
bool padding = false;
if (size > trials.get_nsamps())
padding = true;
CuFFTerR2C r2cfft(size);
CuFFTerC2R c2rfft(size);
float tobs = size*trials.get_tsamp();
float bin_width = 1.0/tobs;
DeviceFourierSeries<hipfftComplex> d_fseries(size/2+1,bin_width);
DedispersedTimeSeries<unsigned char> tim;
ReusableDeviceTimeSeries<float,unsigned char> d_tim(size);
DeviceTimeSeries<float> d_tim_r(size);
TimeDomainResampler resampler;
DevicePowerSpectrum<float> pspec(d_fseries);
Zapper* bzap;
if (args.zapfilename!=""){
if (args.verbose)
std::cout << "Using zapfile: " << args.zapfilename << std::endl;
bzap = new Zapper(args.zapfilename);
}
Dereddener rednoise(size/2+1);
SpectrumFormer former;
PeakFinder cand_finder(args.min_snr,args.min_freq,args.max_freq,size);
HarmonicSums<float> sums(pspec,args.nharmonics);
HarmonicFolder harm_folder(sums);
std::vector<float> acc_list;
HarmonicDistiller harm_finder(args.freq_tol,args.max_harm,false);
AccelerationDistiller acc_still(tobs,args.freq_tol,true);
float mean,std,rms;
float padding_mean;
int ii;
PUSH_NVTX_RANGE("DM-Loop",0)
while (true){
//timers["get_trial_dm"].start();
ii = manager.get_dm_trial_idx();
//timers["get_trial_dm"].stop();
if (ii==-1)
break;
trials.get_idx(ii,tim);
if (args.verbose)
std::cout << "Copying DM trial to device (DM: " << tim.get_dm() << ")"<< std::endl;
d_tim.copy_from_host(tim);
//timers["rednoise"].start()
if (padding){
padding_mean = stats::mean<float>(d_tim.get_data(),trials.get_nsamps());
d_tim.fill(trials.get_nsamps(),d_tim.get_nsamps(),padding_mean);
}
if (args.verbose)
std::cout << "Generating acceleration list" << std::endl;
acc_plan.generate_accel_list(tim.get_dm(),acc_list);
if (args.verbose)
std::cout << "Searching "<< acc_list.size()<< " acceleration trials for DM "<< tim.get_dm() << std::endl;
if (args.verbose)
std::cout << "Executing forward FFT" << std::endl;
r2cfft.execute(d_tim.get_data(),d_fseries.get_data());
if (args.verbose)
std::cout << "Forming power spectrum" << std::endl;
former.form(d_fseries,pspec);
if (args.verbose)
std::cout << "Finding running median" << std::endl;
rednoise.calculate_median(pspec);
if (args.verbose)
std::cout << "Dereddening Fourier series" << std::endl;
rednoise.deredden(d_fseries);
if (args.zapfilename!=""){
if (args.verbose)
std::cout << "Zapping birdies" << std::endl;
bzap->zap(d_fseries);
}
if (args.verbose)
std::cout << "Forming interpolated power spectrum" << std::endl;
former.form_interpolated(d_fseries,pspec);
if (args.verbose)
std::cout << "Finding statistics" << std::endl;
stats::stats<float>(pspec.get_data(),size/2+1,&mean,&rms,&std);
if (args.verbose)
std::cout << "Executing inverse FFT" << std::endl;
c2rfft.execute(d_fseries.get_data(),d_tim.get_data());
CandidateCollection accel_trial_cands;
PUSH_NVTX_RANGE("Acceleration-Loop",1)
for (int jj=0;jj<acc_list.size();jj++){
//if (args.verbose)
// std::cout << "Resampling to "<< acc_list[jj] << " m/s/s" << std::endl;
resampler.resample(d_tim,d_tim_r,size,acc_list[jj]);
//if (args.verbose)
// std::cout << "Execute forward FFT" << std::endl;
r2cfft.execute(d_tim_r.get_data(),d_fseries.get_data());
//if (args.verbose)
// std::cout << "Form interpolated power spectrum" << std::endl;
former.form_interpolated(d_fseries,pspec);
//if (args.verbose)
// std::cout << "Normalise power spectrum" << std::endl;
stats::normalise(pspec.get_data(),mean*size,std*size,size/2+1);
//if (args.verbose)
// std::cout << "Harmonic summing" << std::endl;
harm_folder.fold(pspec);
//if (args.verbose)
// std::cout << "Finding peaks" << std::endl;
SpectrumCandidates trial_cands(tim.get_dm(),ii,acc_list[jj]);
cand_finder.find_candidates(pspec,trial_cands);
cand_finder.find_candidates(sums,trial_cands);
//if (args.verbose)
// std::cout << "Distilling harmonics" << std::endl;
accel_trial_cands.append(harm_finder.distill(trial_cands.cands));
}
POP_NVTX_RANGE
if (args.verbose)
std::cout << "Distilling accelerations" << std::endl;
dm_trial_cands.append(acc_still.distill(accel_trial_cands.cands));
}
POP_NVTX_RANGE
if (args.zapfilename!="")
delete bzap;
if (args.verbose)
std::cout << "DM processing took " << pass_timer.getTime() << " seconds"<< std::endl;
}
};
void* launch_worker_thread(void* ptr){
reinterpret_cast<Worker*>(ptr)->start();
return NULL;
}
int main(int argc, char* argv[])
{
std::map<std::string,Stopwatch> timers;
timers["reading"] = Stopwatch();
timers["dedispersion"] = Stopwatch();
timers["searching"] = Stopwatch();
timers["folding"] = Stopwatch();
timers["total"] = Stopwatch();
timers["pulsar"] = Stopwatch();
timers["single_pulse"] = Stopwatch();
timers["total"].start();
cout << "##########################################" << endl;
cout << "THIS IS A DEVELOPMENT VERSION!!" << endl;
cout << "DO NOT USE AS A PART OF REGULAR PIPELINE!!" << endl;
cout << "##########################################" << endl << endl;
CmdLineOptions args;
if (!read_cmdline_options(args,argc,argv))
ErrorChecker::throw_error("Failed to parse command line arguments.");
int device_count;
if( hipSuccess != hipGetDeviceCount(&device_count))
// exits if there are no devices detected
ErrorChecker::throw_error("There are no available CUDA-capable devices");
cout << "There are " << device_count << " available devices" << endl;
hipDeviceProp_t properties;
for(int i=0; i < device_count; i++)
{
hipGetDeviceProperties(&properties, i);
cout << "Device " << i << ": " << properties.name << endl;
}
cout << "Number of devices requested: " << args.max_num_threads << endl;
if (device_count < args.max_num_threads)
ErrorChecker::throw_error("The number of requested devices has to be lower or equal to the number of available devices");
int nthreads = args.max_num_threads;
if (!args.gpu_ids.empty())
{
if (args.gpu_ids.size() != nthreads)
{
cout << "The number of GPUs used must be the same as the number of IDs provided (if any)" << std::endl;
cout << "Will now terminate!" << endl;
return 1;
}
} else
{
// will always tart with ID 0
for (int current_id = 0; current_id < nthreads; current_id++)
{
args.gpu_ids.push_back(current_id);
}
}
args.verbose = true; // for testing purposes
std::vector<int>::iterator ids_iterator;
cout << endl;
cout << "Devices that will be used: " << endl;
for (ids_iterator = args.gpu_ids.begin(); ids_iterator < args.gpu_ids.end(); ++ids_iterator)
{
hipGetDeviceProperties(&properties, *ids_iterator);
cout << "Device " << *ids_iterator << ": " << properties.name << endl;
}
if (args.verbose)
cout << "Using file: " << args.infilename << endl;
std::string filename(args.infilename);
if (args.progress_bar)
cout << "Reading data from " << args.infilename.c_str() << endl;
timers["reading"].start();
unsigned int disp_diff = 10; // so I don't have to make the whole thing again
bool smooth = true; // mean and stdev smoothing on/off
hipSetDevice(args.gpu_ids[0]);
SigprocFilterbank filobj(filename, disp_diff, smooth);
timers["reading"].stop();
if (args.progress_bar)
{
cout << "Complete (read time: " << timers["reading"].getTime() << "s)" << endl;
}
std::cout << "Starting dedispersion phase, common for both pulsar and single pulse detection" << std::endl;
Dedisperser dedisperser(filobj,args.gpu_ids,nthreads); // dedisp_create_plan_multi invoked here
if (args.killfilename!="")
{
if (args.verbose)
std::cout << "Using killfile: " << args.killfilename << std::endl;
dedisperser.set_killmask(args.killfilename);
}
std::cout << "Generating DM list" << std::endl;
dedisperser.generate_dm_list(args.dm_start,args.dm_end,args.dm_pulse_width,args.dm_tol);
std::vector<float> dm_list = dedisperser.get_dm_list();
if (args.verbose)
{
std::cout << dm_list.size() << " DM trials" << std::endl;
for (int ii=0;ii<dm_list.size();ii++)
std::cout << dm_list[ii] << std::endl; // print out a list of DM trials
}
std::cout << "Executing dedispersion" << std::endl;
if (args.progress_bar)
std::cout << "Starting dedispersion...\n";
timers["dedispersion"].start();
PUSH_NVTX_RANGE("Dedisperse",3)
DispersionTrials<unsigned char> trials = dedisperser.dedisperse();
POP_NVTX_RANGE
size_t output_samps = trials.get_nsamps();
size_t dm_size = trials.get_dm_list_size();
size_t output_size = output_samps * dm_size;
unsigned char *timeseries_data_ptr = trials.get_data();
// print out first and last 262144 time samples
// will amount to total of around 64 seconds of GHRSS data
// REMEMBER - data is DM-major
/*
std::string file_out;
std::ostringstream oss;
for (size_t dm_try = 0; dm_try < dm_size; dm_try++) {
oss.str("");
oss << dm_try;
size_t dm_start = dm_try * output_samps;
file_out = "DM" + oss.str() + ".dat";
std::ofstream to_save(file_out.c_str());
for (size_t sample = 0; sample < 262144; sample++)
to_save << (unsigned int)timeseries_data_ptr[dm_start + sample] << endl;
for (size_t sample = output_samps - 262144; sample < output_samps; sample++)
to_save << (unsigned int)timeseries_data_ptr[dm_start + sample] << endl;
to_save.close();
}
*/
dedisp_plan original_plan = dedisperser.get_dedispersion_plan();
timers["dedispersion"].stop();
if (args.progress_bar)
std::cout << "Dedispersion execution time: " << timers["dedispersion"].getTime() << "s\n";
timers["pulsar"].start();
if( args.pulsar_search || args.both_search)
{
std::cout << "Pulsar searching starts here\n";
unsigned int size;
if (args.size==0)
size = Utils::prev_power_of_two(filobj.get_nsamps());
else
size = args.size;
if (args.verbose)
std::cout << "Setting transform length to " << size << " points" << std::endl;
AccelerationPlan acc_plan(args.acc_start, args.acc_end, args.acc_tol,
args.acc_pulse_width, size, filobj.get_tsamp(),
filobj.get_cfreq(), filobj.get_foff());
//Multithreading commands
timers["searching"].start();
std::vector<Worker*> workers(nthreads);
std::vector<pthread_t> threads(nthreads);
DMDispenser dispenser(trials);
if (args.progress_bar)
dispenser.enable_progress_bar();
cout << "Nthreads: " << nthreads << endl;
for (int ii=0;ii<nthreads;ii++){
workers[ii] = (new Worker(trials,dispenser,acc_plan,args,size,args.gpu_ids[ii]));
pthread_create(&threads[ii], NULL, launch_worker_thread, (void*) workers[ii]);
}
DMDistiller dm_still(args.freq_tol,true);
HarmonicDistiller harm_still(args.freq_tol,args.max_harm,true,false);
CandidateCollection dm_cands;
for (int ii=0; ii<nthreads; ii++){
pthread_join(threads[ii],NULL);
dm_cands.append(workers[ii]->dm_trial_cands.cands);
}
timers["searching"].stop();
cout << "Distilling DMs" << endl;
dm_cands.cands = dm_still.distill(dm_cands.cands);
dm_cands.cands = harm_still.distill(dm_cands.cands);
cout << "Running candidate scorer" << endl;
CandidateScorer cand_scorer(filobj.get_tsamp(),filobj.get_cfreq(), filobj.get_foff(),
fabs(filobj.get_foff())*filobj.get_nchans());
cand_scorer.score_all(dm_cands.cands);
if (args.verbose)
std::cout << "Setting up time series folder" << std::endl;
MultiFolder folder(dm_cands.cands,trials);
timers["folding"].start();
if (args.progress_bar)
folder.enable_progress_bar();
if (args.npdmp > 0){
if (args.verbose)
std::cout << "Folding top "<< args.npdmp <<" cands" << std::endl;
// fold_n checks if npdmp is smaller than the number of candidates
folder.fold_n(args.npdmp);
}
timers["folding"].stop();
if (args.verbose)
std::cout << "Writing output files" << std::endl;
int new_size = ::min(args.limit,(int) dm_cands.cands.size());
dm_cands.cands.resize(new_size);
CandidateFileWriter cand_files(args.outdir);
cand_files.write_binary(dm_cands.cands,"pulsar_candidates.peasoup");
OutputFileWriter stats;
stats.add_misc_info();
stats.add_header(filename);
stats.add_search_parameters(args);
stats.add_dm_list(dm_list);
std::vector<float> acc_list;
acc_plan.generate_accel_list(0.0,acc_list);
stats.add_acc_list(acc_list);
stats.add_gpu_info(args.gpu_ids);
stats.add_candidates(dm_cands.cands,cand_files.byte_mapping);
stats.add_timing_info(timers);
std::stringstream xml_filepath;
xml_filepath << args.outdir << "/" << "pulsar_search_overview.xml";
stats.to_file(xml_filepath.str());
cout << "Finished pulsar searching\n";
if(POST_PROC) {
cout << "Removing pulsar search lock" << endl;
rmdir("pulsar_lock");
}
}
timers["pulsar"].stop();
timers["single_pulse"].start();
for(int ii = 0; ii < nthreads; ii++)
cout << args.gpu_ids[ii];
for(int ii = 1; ii < nthreads; ii++) {
cout << ii << endl;
hipSetDevice(args.gpu_ids[ii]);
cout << ii << endl;
hipDeviceReset();
cout << ii << endl;
}
if( args.single_pulse_search || args.both_search )
{
cout << "Made it here" << endl;
hipSetDevice(args.gpu_ids[0]);
hipDeviceReset();
std::cout << "Single pulse searching starts here\n";
std::cout << "Heimdall, open the Bifrost!!\n";
// because Bifrost opening Heimdall sounds wrong
// create Heimdall pipeline object - use results from pre-peasoup dedispersion
// don't really need the whole hd_create_pipeline in use as it only does the dedisp steps prior to the
// dedispersion such as creating dm list etc.
hd_params params;
hd_set_default_params(¶ms);
params.utc_start = filobj.get_utc_start();
params.output_dir = args.outdir;
params.verbosity = 3; // set the maximum verbosity level, for testing purposes
params.sigproc_file = args.infilename;
params.dm_min = args.dm_start;
params.dm_max = args.dm_end;
params.dm_tol = args.dm_tol;
params.dm_pulse_width = args.dm_pulse_width; // expected intrinsic pulse width
params.dm_nbits = 8; // number of bits per dedispersed sample
params.use_scrunching = false;
params.gpu_id = args.gpu_ids[0]; // need to work on this to enable multi-GPU support
params.detect_thresh = 6.0;
params.f0 = filobj.get_fch1();
params.df = filobj.get_foff();
params.dt = filobj.get_tsamp();
params.nchans = filobj.get_nchans();
//params.utc_start = filobj_get_utc_start(); // leave for now
params.spectra_per_second = (double) 1.0/(double)params.dt;
params.max_giant_rate = args.max_rate;
// round nsamps_gulp to a nearest higher power of 2
size_t power_two_gulp = 1 << (unsigned int)ceil(log2((double)args.gulp_size));
params.nsamps_gulp = power_two_gulp;
size_t nsamps_gulp = params.nsamps_gulp;
float start_time = args.start_time;
float read_time = args.read_time;
// just in case someone puts negative time
size_t start_time_samp = min((unsigned long long)0, (unsigned long long)ceil(start_time / params.dt));
size_t read_time_samp = (size_t)ceil(read_time / params.dt);
cout << start_time_samp << endl;
// default behaviour - read everything
if (read_time_samp == 0)
read_time_samp = output_samps;
cout << read_time_samp << endl;
// make sure we process at least one full gulp
// need to adjust start time
read_time_samp = max((unsigned long long)nsamps_gulp, (unsigned long long)read_time_samp);
start_time_samp = min((long long)start_time_samp, (long long)output_samps - (long long)nsamps_gulp);
cout << "Will process " << read_time_samp << " starting at sample " << start_time_samp << endl;
// check that we are not trying to read beyond what is available
size_t end_time_samp = (size_t)min((unsigned long long)output_samps, (unsigned long long)start_time_samp + read_time_samp);
size_t nbits = filobj.get_nbits();
size_t stride = (params.nchans * nbits) / (8 * sizeof(char));
size_t original_samples = output_samps;
hd_pipeline pipeline;
hd_error error;
cout << "Will process " << read_time_samp << " samples, starting at sample " << start_time_samp << endl;
// dedisp_plan original_plan = dedisperser.get_dedispersion_plan();
cout << "dt: " << original_plan->dt << endl;
//pipeline->set_dedispersion_plan(&original_plan);
error = hd_create_pipeline(&pipeline, original_plan, params);
if ( error != HD_NO_ERROR)
{
std::cerr << "ERROR: pipeline creation failed!!" << std::endl;
return 1;
}
std::cout << "Pipeline created successfully!!" << std::endl;
// hd_byte is unsigned char
// used to store the total number of samples processed so far
size_t total_nsamps = 0;
// move the starting point
total_nsamps = start_time_samp;
size_t overlap = 0;
bool stop_requested = false;
// will stop execution when the number of samples is larger
// or equal to output_samps
size_t nsamps_read = nsamps_gulp;
while( nsamps_read && !stop_requested )
{
if ( params.verbosity >= 1 )
{
cout << "Executing pipeline on new gulp of " << nsamps_gulp
<< " samples..." << endl;
}
hd_size nsamps_processed = 0;
error = hd_execute(pipeline, nsamps_read+overlap, nbits,
total_nsamps, &nsamps_processed, timeseries_data_ptr, original_samples, args.both_search);
if (error == HD_NO_ERROR)
{
if (params.verbosity >= 1)
cout << "Processed " << nsamps_processed << " samples." << endl;
}
else if (error == HD_TOO_MANY_EVENTS)
{
if (params.verbosity >= 1)
cerr << "WARNING: hd_execute produces too many events, some data skipped" << endl;
}
else
{
cerr << "ERROR: Pipeline execution failed" << endl;
cerr << " " << hd_get_error_string(error) << endl;
hd_destroy_pipeline(pipeline);
return -1;
}
//pipeline_timer.stop();
//cout << "pipeline time: " << pipeline_timer.getTime() << " of " << (nsamps_read+overlap) * tsamp << endl;
//pipeline_timer.reset();
total_nsamps += nsamps_processed;
cout << "Samples processed so far: " << total_nsamps << endl;
overlap += nsamps_read - nsamps_processed;
if (total_nsamps + nsamps_processed > end_time_samp)
stop_requested = 1;
}
if( params.verbosity >= 1 )
{
cout << "Successfully processed a total of " << total_nsamps
<< " samples." << endl;
}
if( params.verbosity >= 1 )
{
cout << "Shutting down..." << endl;
}
hd_destroy_pipeline(pipeline);
if( params.verbosity >= 1 )
{
cout << "All done." << endl;
}
if(POST_PROC) {
cout << "Removing single pulse search lock" << endl;
rmdir("single_lock");
}
} // end of the single pulse search if-statement
timers["single_pulse"].stop();
timers["total"].stop();
cout << "Finished the program execution" << endl;
// REMEMBER!! timers is a map!!
cout << "Timing:" << endl
<< "\t * reading the file " << timers["reading"].getTime() << endl
<< "\t * dedispersion: " << timers["dedispersion"].getTime() << endl;
if( args.pulsar_search || args.both_search)
cout << "\t * pulsar search: " << timers["pulsar"].getTime() << endl;
if( args.single_pulse_search || args.both_search )
cout << "\t * single pulse search: " << timers["single_pulse"].getTime() << endl;
return 0;
}
| bifrost.cu | #include <data_types/timeseries.hpp>
#include <data_types/fourierseries.hpp>
#include <data_types/candidates.hpp>
#include <data_types/filterbank.hpp>
#include <pipeline/error.hpp>
#include <pipeline/default_params.hpp>
#include <pipeline/pipeline_types.hpp>
#include <pipeline/pipeline.hpp>
#include <transforms/dedisperser.hpp>
#include <transforms/resampler.hpp>
#include <transforms/folder.hpp>
#include <transforms/ffter.hpp>
#include <transforms/dereddener.hpp>
#include <transforms/spectrumformer.hpp>
#include <transforms/birdiezapper.hpp>
#include <transforms/peakfinder.hpp>
#include <transforms/distiller.hpp>
#include <transforms/harmonicfolder.hpp>
#include <transforms/scorer.hpp>
#include <utils/mean_variance.hpp>
#include <utils/exceptions.hpp>
#include <utils/utils.hpp>
#include <utils/stats.hpp>
#include <utils/stopwatch.hpp>
#include <utils/progress_bar.hpp>
#include <utils/cmdline.hpp>
#include <utils/output_stats.hpp>
#include <string>
#include <iostream>
#include <fstream> // NEEDED FOR TEST OUTPUT FILES
#include <stdio.h>
#include <unistd.h>
#include "cuda.h"
#include "cufft.h"
#include "dedisp.h"
#include "pthread.h"
#include <cmath>
#include <map>
#include <sstream>
using std::cin;
using std::cout;
using std::endl;
using std::cerr;
#define POST_PROC 1
struct dedisp_plan_struct {
// Multi-GPU parameters
dedisp_size device_count;
// Size parameters
dedisp_size dm_count;
dedisp_size nchans;
dedisp_size max_delay;
dedisp_size gulp_size;
// Physical parameters
dedisp_float dt;
dedisp_float f0;
dedisp_float df;
double mean;
double std_dev;
// Host arrays
std::vector<dedisp_float> dm_list; // size = dm_count
std::vector<dedisp_float> delay_table; // size = nchans
std::vector<dedisp_bool> killmask; // size = nchans
std::vector<dedisp_size> scrunch_list; // size = dm_count
// Device arrays //NEW: one for each GPU
std::vector< thrust::device_vector<dedisp_float> > d_dm_list;
std::vector< thrust::device_vector<dedisp_float> > d_delay_table;
std::vector< thrust::device_vector<dedisp_bool> > d_killmask;
std::vector< thrust::device_vector<dedisp_size> > d_scrunch_list;
//StreamType stream;
// Scrunching parameters
dedisp_bool scrunching_enabled;
dedisp_float pulse_width;
dedisp_float scrunch_tol;
};
class DMDispenser {
private:
DispersionTrials<unsigned char>& trials;
pthread_mutex_t mutex;
int dm_idx;
int count;
ProgressBar* progress;
bool use_progress_bar;
public:
DMDispenser(DispersionTrials<unsigned char>& trials)
:trials(trials),dm_idx(0),use_progress_bar(false){
count = trials.get_count();
pthread_mutex_init(&mutex, NULL);
}
void enable_progress_bar(){
progress = new ProgressBar();
use_progress_bar = true;
}
int get_dm_trial_idx(void){
pthread_mutex_lock(&mutex);
int retval;
if (dm_idx==0)
if (use_progress_bar){
printf("Releasing DMs to workers...\n");
progress->start();
}
if (dm_idx >= trials.get_count()){
retval = -1;
if (use_progress_bar)
progress->stop();
} else {
if (use_progress_bar)
progress->set_progress((float)dm_idx/count);
retval = dm_idx;
dm_idx++;
}
pthread_mutex_unlock(&mutex);
return retval;
}
~DMDispenser(){
if (use_progress_bar)
delete progress;
pthread_mutex_destroy(&mutex);
}
};
class Worker {
private:
DispersionTrials<unsigned char>& trials;
DMDispenser& manager;
CmdLineOptions& args;
AccelerationPlan& acc_plan;
unsigned int size;
int device;
std::map<std::string,Stopwatch> timers;
public:
CandidateCollection dm_trial_cands;
Worker(DispersionTrials<unsigned char>& trials, DMDispenser& manager,
AccelerationPlan& acc_plan, CmdLineOptions& args, unsigned int size, int device)
:trials(trials),manager(manager),acc_plan(acc_plan),args(args),size(size),device(device){}
void start(void)
{
//Generate some timer instances for benchmarking
//timers["get_dm_trial"] = Stopwatch();
//timers["copy_to_device"] = Stopwatch();
//timers["rednoise"] = Stopwatch();
//timers["search"] = Stopwatch();
cudaSetDevice(device);
Stopwatch pass_timer;
pass_timer.start();
bool padding = false;
if (size > trials.get_nsamps())
padding = true;
CuFFTerR2C r2cfft(size);
CuFFTerC2R c2rfft(size);
float tobs = size*trials.get_tsamp();
float bin_width = 1.0/tobs;
DeviceFourierSeries<cufftComplex> d_fseries(size/2+1,bin_width);
DedispersedTimeSeries<unsigned char> tim;
ReusableDeviceTimeSeries<float,unsigned char> d_tim(size);
DeviceTimeSeries<float> d_tim_r(size);
TimeDomainResampler resampler;
DevicePowerSpectrum<float> pspec(d_fseries);
Zapper* bzap;
if (args.zapfilename!=""){
if (args.verbose)
std::cout << "Using zapfile: " << args.zapfilename << std::endl;
bzap = new Zapper(args.zapfilename);
}
Dereddener rednoise(size/2+1);
SpectrumFormer former;
PeakFinder cand_finder(args.min_snr,args.min_freq,args.max_freq,size);
HarmonicSums<float> sums(pspec,args.nharmonics);
HarmonicFolder harm_folder(sums);
std::vector<float> acc_list;
HarmonicDistiller harm_finder(args.freq_tol,args.max_harm,false);
AccelerationDistiller acc_still(tobs,args.freq_tol,true);
float mean,std,rms;
float padding_mean;
int ii;
PUSH_NVTX_RANGE("DM-Loop",0)
while (true){
//timers["get_trial_dm"].start();
ii = manager.get_dm_trial_idx();
//timers["get_trial_dm"].stop();
if (ii==-1)
break;
trials.get_idx(ii,tim);
if (args.verbose)
std::cout << "Copying DM trial to device (DM: " << tim.get_dm() << ")"<< std::endl;
d_tim.copy_from_host(tim);
//timers["rednoise"].start()
if (padding){
padding_mean = stats::mean<float>(d_tim.get_data(),trials.get_nsamps());
d_tim.fill(trials.get_nsamps(),d_tim.get_nsamps(),padding_mean);
}
if (args.verbose)
std::cout << "Generating acceleration list" << std::endl;
acc_plan.generate_accel_list(tim.get_dm(),acc_list);
if (args.verbose)
std::cout << "Searching "<< acc_list.size()<< " acceleration trials for DM "<< tim.get_dm() << std::endl;
if (args.verbose)
std::cout << "Executing forward FFT" << std::endl;
r2cfft.execute(d_tim.get_data(),d_fseries.get_data());
if (args.verbose)
std::cout << "Forming power spectrum" << std::endl;
former.form(d_fseries,pspec);
if (args.verbose)
std::cout << "Finding running median" << std::endl;
rednoise.calculate_median(pspec);
if (args.verbose)
std::cout << "Dereddening Fourier series" << std::endl;
rednoise.deredden(d_fseries);
if (args.zapfilename!=""){
if (args.verbose)
std::cout << "Zapping birdies" << std::endl;
bzap->zap(d_fseries);
}
if (args.verbose)
std::cout << "Forming interpolated power spectrum" << std::endl;
former.form_interpolated(d_fseries,pspec);
if (args.verbose)
std::cout << "Finding statistics" << std::endl;
stats::stats<float>(pspec.get_data(),size/2+1,&mean,&rms,&std);
if (args.verbose)
std::cout << "Executing inverse FFT" << std::endl;
c2rfft.execute(d_fseries.get_data(),d_tim.get_data());
CandidateCollection accel_trial_cands;
PUSH_NVTX_RANGE("Acceleration-Loop",1)
for (int jj=0;jj<acc_list.size();jj++){
//if (args.verbose)
// std::cout << "Resampling to "<< acc_list[jj] << " m/s/s" << std::endl;
resampler.resample(d_tim,d_tim_r,size,acc_list[jj]);
//if (args.verbose)
// std::cout << "Execute forward FFT" << std::endl;
r2cfft.execute(d_tim_r.get_data(),d_fseries.get_data());
//if (args.verbose)
// std::cout << "Form interpolated power spectrum" << std::endl;
former.form_interpolated(d_fseries,pspec);
//if (args.verbose)
// std::cout << "Normalise power spectrum" << std::endl;
stats::normalise(pspec.get_data(),mean*size,std*size,size/2+1);
//if (args.verbose)
// std::cout << "Harmonic summing" << std::endl;
harm_folder.fold(pspec);
//if (args.verbose)
// std::cout << "Finding peaks" << std::endl;
SpectrumCandidates trial_cands(tim.get_dm(),ii,acc_list[jj]);
cand_finder.find_candidates(pspec,trial_cands);
cand_finder.find_candidates(sums,trial_cands);
//if (args.verbose)
// std::cout << "Distilling harmonics" << std::endl;
accel_trial_cands.append(harm_finder.distill(trial_cands.cands));
}
POP_NVTX_RANGE
if (args.verbose)
std::cout << "Distilling accelerations" << std::endl;
dm_trial_cands.append(acc_still.distill(accel_trial_cands.cands));
}
POP_NVTX_RANGE
if (args.zapfilename!="")
delete bzap;
if (args.verbose)
std::cout << "DM processing took " << pass_timer.getTime() << " seconds"<< std::endl;
}
};
void* launch_worker_thread(void* ptr){
reinterpret_cast<Worker*>(ptr)->start();
return NULL;
}
int main(int argc, char* argv[])
{
std::map<std::string,Stopwatch> timers;
timers["reading"] = Stopwatch();
timers["dedispersion"] = Stopwatch();
timers["searching"] = Stopwatch();
timers["folding"] = Stopwatch();
timers["total"] = Stopwatch();
timers["pulsar"] = Stopwatch();
timers["single_pulse"] = Stopwatch();
timers["total"].start();
cout << "##########################################" << endl;
cout << "THIS IS A DEVELOPMENT VERSION!!" << endl;
cout << "DO NOT USE AS A PART OF REGULAR PIPELINE!!" << endl;
cout << "##########################################" << endl << endl;
CmdLineOptions args;
if (!read_cmdline_options(args,argc,argv))
ErrorChecker::throw_error("Failed to parse command line arguments.");
int device_count;
if( cudaSuccess != cudaGetDeviceCount(&device_count))
// exits if there are no devices detected
ErrorChecker::throw_error("There are no available CUDA-capable devices");
cout << "There are " << device_count << " available devices" << endl;
cudaDeviceProp properties;
for(int i=0; i < device_count; i++)
{
cudaGetDeviceProperties(&properties, i);
cout << "Device " << i << ": " << properties.name << endl;
}
cout << "Number of devices requested: " << args.max_num_threads << endl;
if (device_count < args.max_num_threads)
ErrorChecker::throw_error("The number of requested devices has to be lower or equal to the number of available devices");
int nthreads = args.max_num_threads;
if (!args.gpu_ids.empty())
{
if (args.gpu_ids.size() != nthreads)
{
cout << "The number of GPUs used must be the same as the number of IDs provided (if any)" << std::endl;
cout << "Will now terminate!" << endl;
return 1;
}
} else
{
// will always tart with ID 0
for (int current_id = 0; current_id < nthreads; current_id++)
{
args.gpu_ids.push_back(current_id);
}
}
args.verbose = true; // for testing purposes
std::vector<int>::iterator ids_iterator;
cout << endl;
cout << "Devices that will be used: " << endl;
for (ids_iterator = args.gpu_ids.begin(); ids_iterator < args.gpu_ids.end(); ++ids_iterator)
{
cudaGetDeviceProperties(&properties, *ids_iterator);
cout << "Device " << *ids_iterator << ": " << properties.name << endl;
}
if (args.verbose)
cout << "Using file: " << args.infilename << endl;
std::string filename(args.infilename);
if (args.progress_bar)
cout << "Reading data from " << args.infilename.c_str() << endl;
timers["reading"].start();
unsigned int disp_diff = 10; // so I don't have to make the whole thing again
bool smooth = true; // mean and stdev smoothing on/off
cudaSetDevice(args.gpu_ids[0]);
SigprocFilterbank filobj(filename, disp_diff, smooth);
timers["reading"].stop();
if (args.progress_bar)
{
cout << "Complete (read time: " << timers["reading"].getTime() << "s)" << endl;
}
std::cout << "Starting dedispersion phase, common for both pulsar and single pulse detection" << std::endl;
Dedisperser dedisperser(filobj,args.gpu_ids,nthreads); // dedisp_create_plan_multi invoked here
if (args.killfilename!="")
{
if (args.verbose)
std::cout << "Using killfile: " << args.killfilename << std::endl;
dedisperser.set_killmask(args.killfilename);
}
std::cout << "Generating DM list" << std::endl;
dedisperser.generate_dm_list(args.dm_start,args.dm_end,args.dm_pulse_width,args.dm_tol);
std::vector<float> dm_list = dedisperser.get_dm_list();
if (args.verbose)
{
std::cout << dm_list.size() << " DM trials" << std::endl;
for (int ii=0;ii<dm_list.size();ii++)
std::cout << dm_list[ii] << std::endl; // print out a list of DM trials
}
std::cout << "Executing dedispersion" << std::endl;
if (args.progress_bar)
std::cout << "Starting dedispersion...\n";
timers["dedispersion"].start();
PUSH_NVTX_RANGE("Dedisperse",3)
DispersionTrials<unsigned char> trials = dedisperser.dedisperse();
POP_NVTX_RANGE
size_t output_samps = trials.get_nsamps();
size_t dm_size = trials.get_dm_list_size();
size_t output_size = output_samps * dm_size;
unsigned char *timeseries_data_ptr = trials.get_data();
// print out first and last 262144 time samples
// will amount to total of around 64 seconds of GHRSS data
// REMEMBER - data is DM-major
/*
std::string file_out;
std::ostringstream oss;
for (size_t dm_try = 0; dm_try < dm_size; dm_try++) {
oss.str("");
oss << dm_try;
size_t dm_start = dm_try * output_samps;
file_out = "DM" + oss.str() + ".dat";
std::ofstream to_save(file_out.c_str());
for (size_t sample = 0; sample < 262144; sample++)
to_save << (unsigned int)timeseries_data_ptr[dm_start + sample] << endl;
for (size_t sample = output_samps - 262144; sample < output_samps; sample++)
to_save << (unsigned int)timeseries_data_ptr[dm_start + sample] << endl;
to_save.close();
}
*/
dedisp_plan original_plan = dedisperser.get_dedispersion_plan();
timers["dedispersion"].stop();
if (args.progress_bar)
std::cout << "Dedispersion execution time: " << timers["dedispersion"].getTime() << "s\n";
timers["pulsar"].start();
if( args.pulsar_search || args.both_search)
{
std::cout << "Pulsar searching starts here\n";
unsigned int size;
if (args.size==0)
size = Utils::prev_power_of_two(filobj.get_nsamps());
else
size = args.size;
if (args.verbose)
std::cout << "Setting transform length to " << size << " points" << std::endl;
AccelerationPlan acc_plan(args.acc_start, args.acc_end, args.acc_tol,
args.acc_pulse_width, size, filobj.get_tsamp(),
filobj.get_cfreq(), filobj.get_foff());
//Multithreading commands
timers["searching"].start();
std::vector<Worker*> workers(nthreads);
std::vector<pthread_t> threads(nthreads);
DMDispenser dispenser(trials);
if (args.progress_bar)
dispenser.enable_progress_bar();
cout << "Nthreads: " << nthreads << endl;
for (int ii=0;ii<nthreads;ii++){
workers[ii] = (new Worker(trials,dispenser,acc_plan,args,size,args.gpu_ids[ii]));
pthread_create(&threads[ii], NULL, launch_worker_thread, (void*) workers[ii]);
}
DMDistiller dm_still(args.freq_tol,true);
HarmonicDistiller harm_still(args.freq_tol,args.max_harm,true,false);
CandidateCollection dm_cands;
for (int ii=0; ii<nthreads; ii++){
pthread_join(threads[ii],NULL);
dm_cands.append(workers[ii]->dm_trial_cands.cands);
}
timers["searching"].stop();
cout << "Distilling DMs" << endl;
dm_cands.cands = dm_still.distill(dm_cands.cands);
dm_cands.cands = harm_still.distill(dm_cands.cands);
cout << "Running candidate scorer" << endl;
CandidateScorer cand_scorer(filobj.get_tsamp(),filobj.get_cfreq(), filobj.get_foff(),
fabs(filobj.get_foff())*filobj.get_nchans());
cand_scorer.score_all(dm_cands.cands);
if (args.verbose)
std::cout << "Setting up time series folder" << std::endl;
MultiFolder folder(dm_cands.cands,trials);
timers["folding"].start();
if (args.progress_bar)
folder.enable_progress_bar();
if (args.npdmp > 0){
if (args.verbose)
std::cout << "Folding top "<< args.npdmp <<" cands" << std::endl;
// fold_n checks if npdmp is smaller than the number of candidates
folder.fold_n(args.npdmp);
}
timers["folding"].stop();
if (args.verbose)
std::cout << "Writing output files" << std::endl;
int new_size = std::min(args.limit,(int) dm_cands.cands.size());
dm_cands.cands.resize(new_size);
CandidateFileWriter cand_files(args.outdir);
cand_files.write_binary(dm_cands.cands,"pulsar_candidates.peasoup");
OutputFileWriter stats;
stats.add_misc_info();
stats.add_header(filename);
stats.add_search_parameters(args);
stats.add_dm_list(dm_list);
std::vector<float> acc_list;
acc_plan.generate_accel_list(0.0,acc_list);
stats.add_acc_list(acc_list);
stats.add_gpu_info(args.gpu_ids);
stats.add_candidates(dm_cands.cands,cand_files.byte_mapping);
stats.add_timing_info(timers);
std::stringstream xml_filepath;
xml_filepath << args.outdir << "/" << "pulsar_search_overview.xml";
stats.to_file(xml_filepath.str());
cout << "Finished pulsar searching\n";
if(POST_PROC) {
cout << "Removing pulsar search lock" << endl;
rmdir("pulsar_lock");
}
}
timers["pulsar"].stop();
timers["single_pulse"].start();
for(int ii = 0; ii < nthreads; ii++)
cout << args.gpu_ids[ii];
for(int ii = 1; ii < nthreads; ii++) {
cout << ii << endl;
cudaSetDevice(args.gpu_ids[ii]);
cout << ii << endl;
cudaDeviceReset();
cout << ii << endl;
}
if( args.single_pulse_search || args.both_search )
{
cout << "Made it here" << endl;
cudaSetDevice(args.gpu_ids[0]);
cudaDeviceReset();
std::cout << "Single pulse searching starts here\n";
std::cout << "Heimdall, open the Bifrost!!\n";
// because Bifrost opening Heimdall sounds wrong
// create Heimdall pipeline object - use results from pre-peasoup dedispersion
// don't really need the whole hd_create_pipeline in use as it only does the dedisp steps prior to the
// dedispersion such as creating dm list etc.
hd_params params;
hd_set_default_params(¶ms);
params.utc_start = filobj.get_utc_start();
params.output_dir = args.outdir;
params.verbosity = 3; // set the maximum verbosity level, for testing purposes
params.sigproc_file = args.infilename;
params.dm_min = args.dm_start;
params.dm_max = args.dm_end;
params.dm_tol = args.dm_tol;
params.dm_pulse_width = args.dm_pulse_width; // expected intrinsic pulse width
params.dm_nbits = 8; // number of bits per dedispersed sample
params.use_scrunching = false;
params.gpu_id = args.gpu_ids[0]; // need to work on this to enable multi-GPU support
params.detect_thresh = 6.0;
params.f0 = filobj.get_fch1();
params.df = filobj.get_foff();
params.dt = filobj.get_tsamp();
params.nchans = filobj.get_nchans();
//params.utc_start = filobj_get_utc_start(); // leave for now
params.spectra_per_second = (double) 1.0/(double)params.dt;
params.max_giant_rate = args.max_rate;
// round nsamps_gulp to a nearest higher power of 2
size_t power_two_gulp = 1 << (unsigned int)ceil(log2((double)args.gulp_size));
params.nsamps_gulp = power_two_gulp;
size_t nsamps_gulp = params.nsamps_gulp;
float start_time = args.start_time;
float read_time = args.read_time;
// just in case someone puts negative time
size_t start_time_samp = min((unsigned long long)0, (unsigned long long)ceil(start_time / params.dt));
size_t read_time_samp = (size_t)ceil(read_time / params.dt);
cout << start_time_samp << endl;
// default behaviour - read everything
if (read_time_samp == 0)
read_time_samp = output_samps;
cout << read_time_samp << endl;
// make sure we process at least one full gulp
// need to adjust start time
read_time_samp = max((unsigned long long)nsamps_gulp, (unsigned long long)read_time_samp);
start_time_samp = min((long long)start_time_samp, (long long)output_samps - (long long)nsamps_gulp);
cout << "Will process " << read_time_samp << " starting at sample " << start_time_samp << endl;
// check that we are not trying to read beyond what is available
size_t end_time_samp = (size_t)min((unsigned long long)output_samps, (unsigned long long)start_time_samp + read_time_samp);
size_t nbits = filobj.get_nbits();
size_t stride = (params.nchans * nbits) / (8 * sizeof(char));
size_t original_samples = output_samps;
hd_pipeline pipeline;
hd_error error;
cout << "Will process " << read_time_samp << " samples, starting at sample " << start_time_samp << endl;
// dedisp_plan original_plan = dedisperser.get_dedispersion_plan();
cout << "dt: " << original_plan->dt << endl;
//pipeline->set_dedispersion_plan(&original_plan);
error = hd_create_pipeline(&pipeline, original_plan, params);
if ( error != HD_NO_ERROR)
{
std::cerr << "ERROR: pipeline creation failed!!" << std::endl;
return 1;
}
std::cout << "Pipeline created successfully!!" << std::endl;
// hd_byte is unsigned char
// used to store the total number of samples processed so far
size_t total_nsamps = 0;
// move the starting point
total_nsamps = start_time_samp;
size_t overlap = 0;
bool stop_requested = false;
// will stop execution when the number of samples is larger
// or equal to output_samps
size_t nsamps_read = nsamps_gulp;
while( nsamps_read && !stop_requested )
{
if ( params.verbosity >= 1 )
{
cout << "Executing pipeline on new gulp of " << nsamps_gulp
<< " samples..." << endl;
}
hd_size nsamps_processed = 0;
error = hd_execute(pipeline, nsamps_read+overlap, nbits,
total_nsamps, &nsamps_processed, timeseries_data_ptr, original_samples, args.both_search);
if (error == HD_NO_ERROR)
{
if (params.verbosity >= 1)
cout << "Processed " << nsamps_processed << " samples." << endl;
}
else if (error == HD_TOO_MANY_EVENTS)
{
if (params.verbosity >= 1)
cerr << "WARNING: hd_execute produces too many events, some data skipped" << endl;
}
else
{
cerr << "ERROR: Pipeline execution failed" << endl;
cerr << " " << hd_get_error_string(error) << endl;
hd_destroy_pipeline(pipeline);
return -1;
}
//pipeline_timer.stop();
//cout << "pipeline time: " << pipeline_timer.getTime() << " of " << (nsamps_read+overlap) * tsamp << endl;
//pipeline_timer.reset();
total_nsamps += nsamps_processed;
cout << "Samples processed so far: " << total_nsamps << endl;
overlap += nsamps_read - nsamps_processed;
if (total_nsamps + nsamps_processed > end_time_samp)
stop_requested = 1;
}
if( params.verbosity >= 1 )
{
cout << "Successfully processed a total of " << total_nsamps
<< " samples." << endl;
}
if( params.verbosity >= 1 )
{
cout << "Shutting down..." << endl;
}
hd_destroy_pipeline(pipeline);
if( params.verbosity >= 1 )
{
cout << "All done." << endl;
}
if(POST_PROC) {
cout << "Removing single pulse search lock" << endl;
rmdir("single_lock");
}
} // end of the single pulse search if-statement
timers["single_pulse"].stop();
timers["total"].stop();
cout << "Finished the program execution" << endl;
// REMEMBER!! timers is a map!!
cout << "Timing:" << endl
<< "\t * reading the file " << timers["reading"].getTime() << endl
<< "\t * dedispersion: " << timers["dedispersion"].getTime() << endl;
if( args.pulsar_search || args.both_search)
cout << "\t * pulsar search: " << timers["pulsar"].getTime() << endl;
if( args.single_pulse_search || args.both_search )
cout << "\t * single pulse search: " << timers["single_pulse"].getTime() << endl;
return 0;
}
|
fa7fc5734d678da845ed01cd7b3bf2d6385359fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define M 500
#define N 500
#define NUM_ELEMENTS M * N
#define DIM_GRID 256
#define DIM_BLOCK 1024
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(hipError_t err, const char *file, int line)
{
if (err != hipSuccess)
{
fprintf(stderr, "%s in %s at line %d\n", hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define CHECK_CUDA_ERROR(msg) (checkCUDAError(msg, __FILE__, __LINE__))
static void checkCUDAError(const char *msg, const char *file, int line)
{
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Cuda error: %s: %s. In %s at line %d\n", msg, hipGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
__global__ void copy_grid(double *d_w, double *d_u)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < M && y < N)
d_u[x + y * N] = d_w[x + y * N];
__syncthreads();
return;
}
__device__ double d_epsilon;
__device__ double d_epsilon_reduction[NUM_ELEMENTS];
__device__ double d_epsilon_reduction_results[DIM_BLOCK];
__global__ void epsilon_reduction(double *d_w, double *d_u)
{
__shared__ double local_reduction[DIM_BLOCK];
int stride = blockDim.x * gridDim.x;
int index = threadIdx.x + blockDim.x * blockIdx.x;
int local_index = threadIdx.x;
local_reduction[local_index] = fabs(d_w[index] - d_u[index]);
if ((index + stride) < NUM_ELEMENTS && local_reduction[local_index] < fabs(d_w[index + stride] - d_u[index + stride]))
local_reduction[local_index] = fabs(d_w[index + stride] - d_u[index + stride]);
__syncthreads();
for (int i = blockDim.x>>1; i>0; i>>=1)
{
if (local_index < i && local_reduction[local_index] < local_reduction[local_index + i])
local_reduction[local_index] = local_reduction[local_index + i];
__syncthreads();
}
if(local_index == 0)
d_epsilon_reduction_results[blockIdx.x] = local_reduction[local_index];
return;
}
__global__ void epsilon_reduction_results()
{
__shared__ double local_reduction[DIM_BLOCK];
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < blockDim.x)
{
local_reduction[index] = 0;
__syncthreads();
local_reduction[index] = d_epsilon_reduction_results[index];
__syncthreads();
for (int i = blockDim.x>>1; i>0; i>>=1)
{
if (index < i && local_reduction[index] < local_reduction[index + i])
local_reduction[index] = local_reduction[index + i];
__syncthreads();
}
if (index == 0)
d_epsilon = local_reduction[index];
__threadfence();
}
return;
}
__global__ void calculate_solution(double *d_w, double *d_u)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x > 0 && y > 0 && x < M - 1 && y < N - 1)
{
int index = x + y * N;
int west = (x - 1) + y * N;
int east = (x + 1) + y * N;
int north = x + (y - 1) * N;
int south = x + (y + 1) * N;
d_w[index] = (d_u[north] + d_u[south] + d_u[east] + d_u[west]) / 4.0;
}
__syncthreads();
return;
}
void calculate_solution_kernel(double w[M][N], double epsilon)
{
double diff;
int iterations;
int iterations_print;
float ElapsedTime;
hipEvent_t cudaStart, cudaStop;
hipEventCreate(&cudaStart);
hipEventCreate(&cudaStop);
const unsigned int matrix_mem_size = sizeof(double) * M * N;
double *d_w = (double *)malloc(matrix_mem_size);
double *d_u = (double *)malloc(matrix_mem_size);
HANDLE_ERROR(hipMalloc((void **)&d_w, matrix_mem_size));
HANDLE_ERROR(hipMalloc((void **)&d_u, matrix_mem_size));
HANDLE_ERROR(hipMemcpy(d_w, w, matrix_mem_size, hipMemcpyHostToDevice));
dim3 dimGrid(16, 16); // 256 blocks
dim3 dimBlock(32, 32); // 1024 threads
diff = epsilon;
iterations = 0;
iterations_print = 1;
printf("\n");
printf(" Iteration Change\n");
printf("\n");
hipEventRecord(cudaStart, 0);
while (epsilon <= diff)
{
hipLaunchKernelGGL(( copy_grid), dim3(dimGrid), dim3(dimBlock), 0, 0, d_w, d_u);
hipLaunchKernelGGL(( calculate_solution), dim3(dimGrid), dim3(dimBlock), 0, 0, d_w, d_u);
hipLaunchKernelGGL(( epsilon_reduction), dim3(DIM_GRID), dim3(DIM_BLOCK), 0, 0, d_w, d_u);
hipLaunchKernelGGL(( epsilon_reduction_results), dim3(DIM_GRID), dim3(DIM_BLOCK), 0, 0, );
hipDeviceSynchronize();
HANDLE_ERROR(hipMemcpyFromSymbol(&diff, d_epsilon, sizeof(double), 0, hipMemcpyDeviceToHost));
iterations++;
if (iterations == iterations_print)
{
printf(" %8d %lg\n", iterations, diff);
iterations_print = 2 * iterations_print;
}
}
CHECK_CUDA_ERROR("Kernel invocation");
hipEventRecord(cudaStop, 0);
hipEventSynchronize(cudaStop);
hipEventElapsedTime(&ElapsedTime, cudaStart, cudaStop);
printf("\n");
printf(" %8d %lg\n", iterations, diff);
printf("\n");
printf(" Error tolerance achieved.\n");
printf(" GPU time = %f\n", ElapsedTime / 1000);
HANDLE_ERROR(hipMemcpy(w, d_w, matrix_mem_size, hipMemcpyDeviceToHost));
hipFree(d_w);
hipFree(d_u);
hipEventDestroy(cudaStart);
hipEventDestroy(cudaStop);
}
#undef M
#undef N
#undef NUM_ELEMENTS
#undef DIM_GRID
#undef DIM_BLOCK | fa7fc5734d678da845ed01cd7b3bf2d6385359fb.cu | #define M 500
#define N 500
#define NUM_ELEMENTS M * N
#define DIM_GRID 256
#define DIM_BLOCK 1024
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
static void HandleError(cudaError_t err, const char *file, int line)
{
if (err != cudaSuccess)
{
fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define CHECK_CUDA_ERROR(msg) (checkCUDAError(msg, __FILE__, __LINE__))
static void checkCUDAError(const char *msg, const char *file, int line)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Cuda error: %s: %s. In %s at line %d\n", msg, cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
__global__ void copy_grid(double *d_w, double *d_u)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < M && y < N)
d_u[x + y * N] = d_w[x + y * N];
__syncthreads();
return;
}
__device__ double d_epsilon;
__device__ double d_epsilon_reduction[NUM_ELEMENTS];
__device__ double d_epsilon_reduction_results[DIM_BLOCK];
__global__ void epsilon_reduction(double *d_w, double *d_u)
{
__shared__ double local_reduction[DIM_BLOCK];
int stride = blockDim.x * gridDim.x;
int index = threadIdx.x + blockDim.x * blockIdx.x;
int local_index = threadIdx.x;
local_reduction[local_index] = fabs(d_w[index] - d_u[index]);
if ((index + stride) < NUM_ELEMENTS && local_reduction[local_index] < fabs(d_w[index + stride] - d_u[index + stride]))
local_reduction[local_index] = fabs(d_w[index + stride] - d_u[index + stride]);
__syncthreads();
for (int i = blockDim.x>>1; i>0; i>>=1)
{
if (local_index < i && local_reduction[local_index] < local_reduction[local_index + i])
local_reduction[local_index] = local_reduction[local_index + i];
__syncthreads();
}
if(local_index == 0)
d_epsilon_reduction_results[blockIdx.x] = local_reduction[local_index];
return;
}
__global__ void epsilon_reduction_results()
{
__shared__ double local_reduction[DIM_BLOCK];
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index < blockDim.x)
{
local_reduction[index] = 0;
__syncthreads();
local_reduction[index] = d_epsilon_reduction_results[index];
__syncthreads();
for (int i = blockDim.x>>1; i>0; i>>=1)
{
if (index < i && local_reduction[index] < local_reduction[index + i])
local_reduction[index] = local_reduction[index + i];
__syncthreads();
}
if (index == 0)
d_epsilon = local_reduction[index];
__threadfence();
}
return;
}
__global__ void calculate_solution(double *d_w, double *d_u)
{
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x > 0 && y > 0 && x < M - 1 && y < N - 1)
{
int index = x + y * N;
int west = (x - 1) + y * N;
int east = (x + 1) + y * N;
int north = x + (y - 1) * N;
int south = x + (y + 1) * N;
d_w[index] = (d_u[north] + d_u[south] + d_u[east] + d_u[west]) / 4.0;
}
__syncthreads();
return;
}
void calculate_solution_kernel(double w[M][N], double epsilon)
{
double diff;
int iterations;
int iterations_print;
float ElapsedTime;
cudaEvent_t cudaStart, cudaStop;
cudaEventCreate(&cudaStart);
cudaEventCreate(&cudaStop);
const unsigned int matrix_mem_size = sizeof(double) * M * N;
double *d_w = (double *)malloc(matrix_mem_size);
double *d_u = (double *)malloc(matrix_mem_size);
HANDLE_ERROR(cudaMalloc((void **)&d_w, matrix_mem_size));
HANDLE_ERROR(cudaMalloc((void **)&d_u, matrix_mem_size));
HANDLE_ERROR(cudaMemcpy(d_w, w, matrix_mem_size, cudaMemcpyHostToDevice));
dim3 dimGrid(16, 16); // 256 blocks
dim3 dimBlock(32, 32); // 1024 threads
diff = epsilon;
iterations = 0;
iterations_print = 1;
printf("\n");
printf(" Iteration Change\n");
printf("\n");
cudaEventRecord(cudaStart, 0);
while (epsilon <= diff)
{
copy_grid<<<dimGrid, dimBlock>>>(d_w, d_u);
calculate_solution<<<dimGrid, dimBlock>>>(d_w, d_u);
epsilon_reduction<<<DIM_GRID, DIM_BLOCK>>>(d_w, d_u);
epsilon_reduction_results<<<DIM_GRID, DIM_BLOCK>>>();
cudaDeviceSynchronize();
HANDLE_ERROR(cudaMemcpyFromSymbol(&diff, d_epsilon, sizeof(double), 0, cudaMemcpyDeviceToHost));
iterations++;
if (iterations == iterations_print)
{
printf(" %8d %lg\n", iterations, diff);
iterations_print = 2 * iterations_print;
}
}
CHECK_CUDA_ERROR("Kernel invocation");
cudaEventRecord(cudaStop, 0);
cudaEventSynchronize(cudaStop);
cudaEventElapsedTime(&ElapsedTime, cudaStart, cudaStop);
printf("\n");
printf(" %8d %lg\n", iterations, diff);
printf("\n");
printf(" Error tolerance achieved.\n");
printf(" GPU time = %f\n", ElapsedTime / 1000);
HANDLE_ERROR(cudaMemcpy(w, d_w, matrix_mem_size, cudaMemcpyDeviceToHost));
cudaFree(d_w);
cudaFree(d_u);
cudaEventDestroy(cudaStart);
cudaEventDestroy(cudaStop);
}
#undef M
#undef N
#undef NUM_ELEMENTS
#undef DIM_GRID
#undef DIM_BLOCK |
e9f4bdf9ac3c0c022a098750c678c77fadbaf2e7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_functions.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cuco/static_map.cuh>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
// FIXME: think about requiring old_new_label_pairs to be pre-shuffled
template <typename vertex_t, bool multi_gpu>
void relabel(raft::handle_t const& handle,
std::tuple<vertex_t const*, vertex_t const*> old_new_label_pairs,
vertex_t num_label_pairs,
vertex_t* labels /* [INOUT] */,
vertex_t num_labels,
bool skip_missing_labels,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size};
// find unique old labels (to be relabeled)
rmm::device_uvector<vertex_t> unique_old_labels(num_labels, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
unique_old_labels.data());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end());
unique_old_labels.resize(
thrust::distance(
unique_old_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end())),
handle.get_stream());
unique_old_labels.shrink_to_fit(handle.get_stream());
// collect new labels for the unique old labels
rmm::device_uvector<vertex_t> new_labels_for_unique_old_labels(0, handle.get_stream());
{
// shuffle the old_new_label_pairs based on applying the compute_gpu_id_from_vertex_t functor
// to the old labels
rmm::device_uvector<vertex_t> rx_label_pair_old_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> rx_label_pair_new_labels(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> label_pair_old_labels(num_label_pairs, handle.get_stream());
rmm::device_uvector<vertex_t> label_pair_new_labels(num_label_pairs, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<0>(old_new_label_pairs),
std::get<0>(old_new_label_pairs) + num_label_pairs,
label_pair_old_labels.begin());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<1>(old_new_label_pairs),
std::get<1>(old_new_label_pairs) + num_label_pairs,
label_pair_new_labels.begin());
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(label_pair_old_labels.begin(), label_pair_new_labels.begin()));
std::forward_as_tuple(std::tie(rx_label_pair_old_labels, rx_label_pair_new_labels),
std::ignore) =
groupby_gpuid_and_shuffle_values(
handle.get_comms(),
pair_first,
pair_first + num_label_pairs,
[key_func] __device__(auto val) { return key_func(thrust::get<0>(val)); },
handle.get_stream());
}
// update intermediate relabel map
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, hipStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(
static_cast<double>(rx_label_pair_old_labels.size()) / load_factor),
rx_label_pair_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(rx_label_pair_old_labels.begin(), rx_label_pair_new_labels.begin()));
relabel_map.insert(pair_first, pair_first + rx_label_pair_old_labels.size());
rx_label_pair_old_labels.resize(0, handle.get_stream());
rx_label_pair_new_labels.resize(0, handle.get_stream());
rx_label_pair_old_labels.shrink_to_fit(handle.get_stream());
rx_label_pair_new_labels.shrink_to_fit(handle.get_stream());
// shuffle unique_old_labels, relabel using the intermediate relabel map, and shuffle back
{
rmm::device_uvector<vertex_t> rx_unique_old_labels(0, handle.get_stream());
std::vector<size_t> rx_value_counts{};
std::tie(rx_unique_old_labels, rx_value_counts) = groupby_gpuid_and_shuffle_values(
handle.get_comms(),
unique_old_labels.begin(),
unique_old_labels.end(),
[key_func] __device__(auto val) { return key_func(val); },
handle.get_stream());
CUDA_TRY(hipStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin(),
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin()); // now rx_unique_old_lables hold new labels for the
// corresponding old labels
}
std::tie(new_labels_for_unique_old_labels, std::ignore) = shuffle_values(
handle.get_comms(), rx_unique_old_labels.begin(), rx_value_counts, handle.get_stream());
}
}
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
{
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, hipStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{
// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(static_cast<double>(unique_old_labels.size()) / load_factor),
unique_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(unique_old_labels.begin(), new_labels_for_unique_old_labels.begin()));
relabel_map.insert(pair_first, pair_first + unique_old_labels.size());
relabel_map.find(labels, labels + num_labels, labels);
}
} else {
cuco::static_map<vertex_t, vertex_t> relabel_map(
// cuco::static_map requires at least one empty slot
::max(static_cast<size_t>(static_cast<double>(num_label_pairs) / load_factor),
static_cast<size_t>(num_label_pairs) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(std::get<0>(old_new_label_pairs), std::get<1>(old_new_label_pairs)));
relabel_map.insert(pair_first, pair_first + num_label_pairs);
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
labels,
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(labels, labels + num_labels, labels);
}
}
if (do_expensive_check && !skip_missing_labels) {
CUGRAPH_EXPECTS(
thrust::count(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
invalid_vertex_id<vertex_t>::value) == 0,
"Invalid input argument: labels include old label values missing in old_new_label_pairs.");
}
return;
}
// explicit instantiation
template void relabel<int32_t, true>(raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int32_t, false>(
raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, true>(raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, false>(
raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
| e9f4bdf9ac3c0c022a098750c678c77fadbaf2e7.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cugraph/experimental/detail/graph_utils.cuh>
#include <cugraph/experimental/graph.hpp>
#include <cugraph/experimental/graph_functions.hpp>
#include <cugraph/experimental/graph_view.hpp>
#include <cugraph/patterns/copy_to_adj_matrix_row_col.cuh>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/shuffle_comm.cuh>
#include <rmm/thrust_rmm_allocator.h>
#include <cuco/static_map.cuh>
#include <raft/handle.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/polymorphic_allocator.hpp>
#include <thrust/copy.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <algorithm>
#include <iterator>
#include <numeric>
#include <tuple>
#include <utility>
namespace cugraph {
namespace experimental {
// FIXME: think about requiring old_new_label_pairs to be pre-shuffled
template <typename vertex_t, bool multi_gpu>
void relabel(raft::handle_t const& handle,
std::tuple<vertex_t const*, vertex_t const*> old_new_label_pairs,
vertex_t num_label_pairs,
vertex_t* labels /* [INOUT] */,
vertex_t num_labels,
bool skip_missing_labels,
bool do_expensive_check)
{
double constexpr load_factor = 0.7;
if (multi_gpu) {
auto& comm = handle.get_comms();
auto const comm_size = comm.get_size();
auto key_func = detail::compute_gpu_id_from_vertex_t<vertex_t>{comm_size};
// find unique old labels (to be relabeled)
rmm::device_uvector<vertex_t> unique_old_labels(num_labels, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
unique_old_labels.data());
thrust::sort(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end());
unique_old_labels.resize(
thrust::distance(
unique_old_labels.begin(),
thrust::unique(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
unique_old_labels.begin(),
unique_old_labels.end())),
handle.get_stream());
unique_old_labels.shrink_to_fit(handle.get_stream());
// collect new labels for the unique old labels
rmm::device_uvector<vertex_t> new_labels_for_unique_old_labels(0, handle.get_stream());
{
// shuffle the old_new_label_pairs based on applying the compute_gpu_id_from_vertex_t functor
// to the old labels
rmm::device_uvector<vertex_t> rx_label_pair_old_labels(0, handle.get_stream());
rmm::device_uvector<vertex_t> rx_label_pair_new_labels(0, handle.get_stream());
{
rmm::device_uvector<vertex_t> label_pair_old_labels(num_label_pairs, handle.get_stream());
rmm::device_uvector<vertex_t> label_pair_new_labels(num_label_pairs, handle.get_stream());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<0>(old_new_label_pairs),
std::get<0>(old_new_label_pairs) + num_label_pairs,
label_pair_old_labels.begin());
thrust::copy(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
std::get<1>(old_new_label_pairs),
std::get<1>(old_new_label_pairs) + num_label_pairs,
label_pair_new_labels.begin());
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(label_pair_old_labels.begin(), label_pair_new_labels.begin()));
std::forward_as_tuple(std::tie(rx_label_pair_old_labels, rx_label_pair_new_labels),
std::ignore) =
groupby_gpuid_and_shuffle_values(
handle.get_comms(),
pair_first,
pair_first + num_label_pairs,
[key_func] __device__(auto val) { return key_func(thrust::get<0>(val)); },
handle.get_stream());
}
// update intermediate relabel map
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, cudaStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(
static_cast<double>(rx_label_pair_old_labels.size()) / load_factor),
rx_label_pair_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(rx_label_pair_old_labels.begin(), rx_label_pair_new_labels.begin()));
relabel_map.insert(pair_first, pair_first + rx_label_pair_old_labels.size());
rx_label_pair_old_labels.resize(0, handle.get_stream());
rx_label_pair_new_labels.resize(0, handle.get_stream());
rx_label_pair_old_labels.shrink_to_fit(handle.get_stream());
rx_label_pair_new_labels.shrink_to_fit(handle.get_stream());
// shuffle unique_old_labels, relabel using the intermediate relabel map, and shuffle back
{
rmm::device_uvector<vertex_t> rx_unique_old_labels(0, handle.get_stream());
std::vector<size_t> rx_value_counts{};
std::tie(rx_unique_old_labels, rx_value_counts) = groupby_gpuid_and_shuffle_values(
handle.get_comms(),
unique_old_labels.begin(),
unique_old_labels.end(),
[key_func] __device__(auto val) { return key_func(val); },
handle.get_stream());
CUDA_TRY(cudaStreamSynchronize(
handle.get_stream())); // cuco::static_map currently does not take stream
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin(),
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(
rx_unique_old_labels.begin(),
rx_unique_old_labels.end(),
rx_unique_old_labels.begin()); // now rx_unique_old_lables hold new labels for the
// corresponding old labels
}
std::tie(new_labels_for_unique_old_labels, std::ignore) = shuffle_values(
handle.get_comms(), rx_unique_old_labels.begin(), rx_value_counts, handle.get_stream());
}
}
handle.get_stream_view().synchronize(); // cuco::static_map currently does not take stream
{
auto poly_alloc =
rmm::mr::polymorphic_allocator<char>(rmm::mr::get_current_device_resource());
auto stream_adapter =
rmm::mr::make_stream_allocator_adaptor(poly_alloc, cudaStream_t{nullptr});
cuco::static_map<vertex_t, vertex_t, cuda::thread_scope_device, decltype(stream_adapter)>
relabel_map{
// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(static_cast<double>(unique_old_labels.size()) / load_factor),
unique_old_labels.size() + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value,
stream_adapter};
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(unique_old_labels.begin(), new_labels_for_unique_old_labels.begin()));
relabel_map.insert(pair_first, pair_first + unique_old_labels.size());
relabel_map.find(labels, labels + num_labels, labels);
}
} else {
cuco::static_map<vertex_t, vertex_t> relabel_map(
// cuco::static_map requires at least one empty slot
std::max(static_cast<size_t>(static_cast<double>(num_label_pairs) / load_factor),
static_cast<size_t>(num_label_pairs) + 1),
invalid_vertex_id<vertex_t>::value,
invalid_vertex_id<vertex_t>::value);
auto pair_first = thrust::make_zip_iterator(
thrust::make_tuple(std::get<0>(old_new_label_pairs), std::get<1>(old_new_label_pairs)));
relabel_map.insert(pair_first, pair_first + num_label_pairs);
if (skip_missing_labels) {
thrust::transform(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
labels,
[view = relabel_map.get_device_view()] __device__(auto old_label) {
auto found = view.find(old_label);
return found != view.end() ? view.find(old_label)->second.load(
cuda::std::memory_order_relaxed)
: old_label;
});
} else {
relabel_map.find(labels, labels + num_labels, labels);
}
}
if (do_expensive_check && !skip_missing_labels) {
CUGRAPH_EXPECTS(
thrust::count(rmm::exec_policy(handle.get_stream())->on(handle.get_stream()),
labels,
labels + num_labels,
invalid_vertex_id<vertex_t>::value) == 0,
"Invalid input argument: labels include old label values missing in old_new_label_pairs.");
}
return;
}
// explicit instantiation
template void relabel<int32_t, true>(raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int32_t, false>(
raft::handle_t const& handle,
std::tuple<int32_t const*, int32_t const*> old_new_label_pairs,
int32_t num_label_pairs,
int32_t* labels,
int32_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, true>(raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
template void relabel<int64_t, false>(
raft::handle_t const& handle,
std::tuple<int64_t const*, int64_t const*> old_new_label_pairs,
int64_t num_label_pairs,
int64_t* labels,
int64_t num_labels,
bool skip_missing_labels,
bool do_expensive_check);
} // namespace experimental
} // namespace cugraph
|
eb052fff4bd228589aaaf9cabc869640ba2811e2.hip | // !!! This is a file automatically generated by hipify!!!
#define BIN_NUM 7
#define PWARP 4
#define IMB_PWMIN 32
#define B_PWMIN 16
#define IMB_MIN 512
#define B_MIN 256
#define IMB_PW_SH_SIZE 2048
#define B_PW_SH_SIZE 1024
#define IMB_SH_SIZE 1024
#define B_SH_SIZE 512
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <cusparse_v2.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <nsparse.h>
#include <nsparse_asm.h>
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to COMP_SH_SIZE
#define ONSTREAM
void init_bin(sfBIN *bin, int M)
{
int i;
bin->stream = (hipStream_t *)malloc(sizeof(hipStream_t) * BIN_NUM);
for (i = 0; i < BIN_NUM; i++) {
hipStreamCreate(&(bin->stream[i]));
}
bin->bin_size = (int *)malloc(sizeof(int) * BIN_NUM);
bin->bin_offset = (int *)malloc(sizeof(int) * BIN_NUM);
checkCudaErrors(hipMalloc((void **)&(bin->d_row_perm), sizeof(int) * M));
checkCudaErrors(hipMalloc((void **)&(bin->d_row_nz), sizeof(int) * (M + 1)));
checkCudaErrors(hipMalloc((void **)&(bin->d_max), sizeof(int)));
checkCudaErrors(hipMalloc((void **)&(bin->d_bin_size), sizeof(int) * BIN_NUM));
checkCudaErrors(hipMalloc((void **)&(bin->d_bin_offset), sizeof(int) * BIN_NUM));
i = 0;
bin->max_intprod = 0;
bin->max_nz = 0;
}
void release_bin(sfBIN bin)
{
int i;
hipFree(bin.d_row_nz);
hipFree(bin.d_row_perm);
hipFree(bin.d_max);
hipFree(bin.d_bin_size);
hipFree(bin.d_bin_offset);
free(bin.bin_size);
free(bin.bin_offset);
for (i = 0; i < BIN_NUM; i++) {
hipStreamDestroy(bin.stream[i]);
}
free(bin.stream);
}
__global__ void set_intprod_num(int *d_arpt, int *d_acol,
const int* __restrict__ d_brpt,
int *d_row_intprod, int *d_max_intprod,
int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = 0;
int j;
for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) {
nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]];
}
d_row_intprod[i] = nz_per_row;
atomicMax(d_max_intprod, nz_per_row);
}
__global__ void set_bin(int *d_row_nz, int *d_bin_size, int *d_max,
int M, int min, int mmin)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_row_nz[i];
atomicMax(d_max, nz_per_row);
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= (mmin)) {
atomicAdd(d_bin_size + j, 1);
}
else {
atomicAdd(d_bin_size + j + 1, 1);
}
return;
}
}
atomicAdd(d_bin_size + BIN_NUM - 1, 1);
}
__global__ void init_row_perm(int *d_permutation, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
d_permutation[i] = i;
}
__global__ void set_row_perm(int *d_bin_size, int *d_bin_offset,
int *d_max_row_nz, int *d_row_perm,
int M, int min, int mmin)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_max_row_nz[i];
int dest;
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= mmin) {
dest = atomicAdd(d_bin_size + j, 1);
d_row_perm[d_bin_offset[j] + dest] = i;
}
else {
dest = atomicAdd(d_bin_size + j + 1, 1);
d_row_perm[d_bin_offset[j + 1] + dest] = i;
}
return;
}
}
dest = atomicAdd(d_bin_size + BIN_NUM - 1, 1);
d_row_perm[d_bin_offset[BIN_NUM - 1] + dest] = i;
}
void set_max_bin(int *d_arpt, int *d_acol, int *d_brpt, sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
checkCudaErrors(hipMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), hipMemcpyHostToDevice));
//checkCudaErrors( hipMemset(bin->d_bin_size, 0, sizeof(int) * BIN_NUM) );
//checkCudaErrors(hipMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), hipMemcpyHostToDevice));
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( set_intprod_num), dim3(GS), dim3(BS), 0, 0, d_arpt, d_acol, d_brpt, bin->d_row_nz, bin->d_max, M);
checkCudaErrors(hipMemcpy(&(bin->max_intprod), bin->d_max, sizeof(int), hipMemcpyDeviceToHost));
if (bin->max_intprod > IMB_PWMIN) {
hipLaunchKernelGGL(( set_bin), dim3(GS), dim3(BS), 0, 0, bin->d_row_nz, bin->d_bin_size, bin->d_max, M, IMB_MIN, IMB_PWMIN);
checkCudaErrors(hipMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice));
//checkCudaErrors( hipMemset(bin->d_bin_size, 0, sizeof(int) * BIN_NUM) );
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
checkCudaErrors(hipMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( set_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, IMB_MIN, IMB_PWMIN);
}else{
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
hipLaunchKernelGGL(( init_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_row_perm, M);
}
}
void set_min_bin(sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
hipMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipMemcpy(bin->d_max, &(bin->max_nz), sizeof(int), hipMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( set_bin), dim3(GS), dim3(BS), 0, 0, bin->d_row_nz, bin->d_bin_size,
bin->d_max,
M, B_MIN, B_PWMIN);
hipMemcpy(&(bin->max_nz), bin->d_max, sizeof(int), hipMemcpyDeviceToHost);
if (bin->max_nz > B_PWMIN) {
hipMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, hipMemcpyDeviceToHost);
hipMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
hipMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( set_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, B_MIN, B_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( init_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_row_perm, M);
}
}
__global__ void init_value(real *d_val, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_val[i] = 0;
}
__global__ void init_check(int *d_check, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_check[i] = -1;
}
__global__ void set_row_nz_bin_pwarp(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz,
int bin_offset, int M) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j, k;
int soffset;
int acol, bcol, key, hash, adr, nz, old;
__shared__ int check[IMB_PW_SH_SIZE];
soffset = local_rid * IMB_PWMIN;
for (j = tid; j < IMB_PWMIN; j += PWARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
rid = d_row_perm[rid + bin_offset];
nz = 0;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (IMB_PWMIN - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (IMB_PWMIN - 1);
adr = soffset + hash;
}
}
}
}
for (j = PWARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int bin_offset, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j, k, l;
int bcol, key, hash, old;
int nz, adr;
int acol, ccol;
int soffset;
soffset = wid * SH_ROW;
__shared__ int check[IMB_SH_SIZE];
for (j = tid; j < SH_ROW; j += WARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
acol = 0;
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) acol = ld_gbl_int32(d_acol + j + tid);
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
ccol = __shfl(acol, l);
for (k = d_brpt[ccol] + tid; k < d_brpt[ccol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
__shared__ int check[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (rid >= M) {
return;
}
__syncthreads();
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
__syncthreads();
if (threadIdx.x == 0) {
check[0] = 0;
}
__syncthreads();
if (tid == 0) {
atomicAdd(check, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = check[0];
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb_large(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int *d_fail_count, int *d_fail_perm,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int adr;
int acol;
__shared__ int check[SH_ROW];
__shared__ int snz[1];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (threadIdx.x == 0) {
snz[0] = 0;
}
if (rid >= M) {
return;
}
__syncthreads();
rid = d_row_perm[rid + bin_offset];
int count = 0;
int border = SH_ROW >> 1;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (count < border && snz[0] < border) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
atomicAdd(snz, 1);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
count++;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
__syncthreads();
if (count >= border || snz[0] >= border) {
if (threadIdx.x == 0) {
int d = atomicAdd(d_fail_count, 1);
d_fail_perm[d] = rid;
}
}
else {
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
}
__global__ void set_row_nz_bin_each_gl(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int *d_check,
int max_row_nz, int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
int offset = rid * max_row_nz;
__shared__ int snz[1];
if (threadIdx.x == 0) {
snz[0] = 0;
}
__syncthreads();
if (rid >= M) {
return;
}
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = offset + hash;
while (1) {
if (d_check[adr] == key) {
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = offset + hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
atomicAdd(snz, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M, int *nnz);
__global__ void calculate_value_col_bin_pwarp(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j;
__shared__ int shared_check[B_PW_SH_SIZE];
__shared__ real shared_value[B_PW_SH_SIZE];
int soffset = local_rid * (B_PWMIN);
for (j = tid; j < (B_PWMIN); j += PWARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int k;
int acol, bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & ((B_PWMIN) - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & ((B_PWMIN) - 1);
adr = soffset + hash;
}
}
}
}
for (j = tid; j < (B_PWMIN); j += PWARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
// Sorting for shared data
int count, target;
for (j = tid; j < nz; j += PWARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j;
__shared__ int shared_check[B_SH_SIZE];
__shared__ real shared_value[B_SH_SIZE];
int soffset = wid * SH_ROW;
for (j = tid; j < SH_ROW; j += WARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int lacol, acol;
int k, l;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real laval, aval, bval;
lacol = 0;
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) {
lacol = ld_gbl_int32(d_acol + j + tid);
laval = ld_gbl_real(d_aval + j + tid);
}
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
acol = __shfl(lacol, l);
aval = __shfl(laval, l);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = tid; j < nz; j += WARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each_tb(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
__shared__ int shared_check[SH_ROW];
__shared__ real shared_value[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
shared_check[j] = -1;
shared_value[j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
else if (shared_check[hash] == -1) {
old = atomicCAS(shared_check + hash, -1, key);
if (old == -1) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[index] = shared_check[j];
shared_value[index] = shared_value[j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = shared_check[j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[j];
d_cval[offset + count] = shared_value[j];
}
}
__global__ void calculate_value_col_bin_each_gl(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int *d_check,
real *d_value,
int max_row_nz,
int bin_offset,
int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
if (rid >= M) {
return;
}
int doffset = rid * max_row_nz;
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = doffset + hash;
while (1) {
if (d_check[adr] == key) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = doffset + hash;
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < max_row_nz; j += WARP) {
if (d_check[doffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
d_check[doffset + index] = d_check[doffset + j];
d_value[doffset + index] = d_value[doffset + j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = d_check[doffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(d_check[doffset + k] - target) >> 31;
}
d_ccol[offset + count] = d_check[doffset + j];
d_cval[offset + count] = d_value[doffset + j];
}
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M);
void spgemm_kernel_hash(sfCSR *a, sfCSR *b, sfCSR *c)
{
int M;
sfBIN bin;
M = a->M;
c->M = M;
c->N = b->N;
/* Initialize bin */
init_bin(&bin, M);
checkCudaErrors(hipDeviceSynchronize());
/* Set max bin */
set_max_bin(a->d_rpt, a->d_col, b->d_rpt, &bin, M);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc((void **)&(c->d_rpt), sizeof(int) * (M + 1)));
/* Count nz of C */
set_row_nnz(a->d_rpt, a->d_col,
b->d_rpt, b->d_col,
c->d_rpt,
&bin,
M,
&(c->nnz));
/* Set bin */
set_min_bin(&bin, M);
checkCudaErrors(hipMalloc((void **)&(c->d_col), sizeof(int) * c->nnz));
checkCudaErrors(hipMalloc((void **)&(c->d_val), sizeof(real) * c->nnz));
/* Calculating value of C */
calculate_value_col_bin(a->d_rpt, a->d_col, a->d_val,
b->d_rpt, b->d_col, b->d_val,
c->d_rpt, c->d_col, c->d_val,
&bin,
M);
release_bin(bin);
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M,
int *nnz)
{
int i;
int GS, BS;
for (i = BIN_NUM - 1; i >= 0; i--) {
if (bin->bin_size[i] > 0) {
switch (i) {
case 0:
BS = 256;
GS = div_round_up(bin->bin_size[i] * PWARP, BS);
hipLaunchKernelGGL(( set_row_nz_bin_pwarp), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol,
d_brpt, d_bcol,
bin->d_row_perm,
bin->d_row_nz,
bin->bin_offset[i],
bin->bin_size[i]);
break;
case 1 :
BS = 64;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb<512>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 2 :
BS = 128;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb<1024>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 3 :
BS = 256;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb<2048>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 4 :
BS = 512;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb<4096>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 5 :
BS = 1024;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb<8192>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 6 :
{
int fail_count;
int *d_fail_count, *d_fail_perm;
fail_count = 0;
checkCudaErrors(hipMalloc((void **)&d_fail_count, sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_fail_perm, sizeof(int) * bin->bin_size[i]));
hipMemcpy(d_fail_count, &fail_count, sizeof(int), hipMemcpyHostToDevice);
BS = 1024;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_tb_large<8192>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
d_fail_count, d_fail_perm,
bin->bin_offset[i], bin->bin_size[i]);
hipMemcpy(&fail_count, d_fail_count, sizeof(int), hipMemcpyDeviceToHost);
if (fail_count > 0) {
int max_row_nz = bin->max_intprod;
size_t table_size = (size_t)max_row_nz * fail_count;
int *d_check;
checkCudaErrors(hipMalloc((void **)&(d_check), sizeof(int) * table_size));
BS = 1024;
GS = div_round_up(table_size, BS);
hipLaunchKernelGGL(( init_check), dim3(GS), dim3(BS), 0, bin->stream[i], d_check, table_size);
GS = bin->bin_size[i];
hipLaunchKernelGGL(( set_row_nz_bin_each_gl), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_brpt, d_bcol,
d_fail_perm, bin->d_row_nz, d_check,
max_row_nz, 0, fail_count);
hipFree(d_check);
}
hipFree(d_fail_count);
hipFree(d_fail_perm);
}
break;
default :
exit(0);
}
}
}
hipDeviceSynchronize();
/* Set row pointer of matrix C */
thrust::exclusive_scan(thrust::device, bin->d_row_nz, bin->d_row_nz + (M + 1), d_crpt, 0);
hipMemcpy(nnz, d_crpt + M, sizeof(int), hipMemcpyDeviceToHost);
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M)
{
int i;
int GS, BS;
for (i = BIN_NUM - 1; i >= 0; i--) {
if (bin->bin_size[i] > 0) {
switch (i) {
case 0:
BS = 256;
GS = div_round_up(bin->bin_size[i] * PWARP, BS);
hipLaunchKernelGGL(( calculate_value_col_bin_pwarp), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 1:
BS = 64;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_tb<256>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 2:
BS = 128;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_tb<512>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 3:
BS = 256;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_tb<1024>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 4:
BS = 512;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_tb<2048>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 5:
BS = 1024;
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_tb<4096>), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 6 :
{
int max_row_nz = bin->max_nz * 2;
int table_size = max_row_nz * bin->bin_size[i];
int *d_check;
real *d_value;
checkCudaErrors(hipMalloc((void **)&(d_check), sizeof(int) * table_size));
checkCudaErrors(hipMalloc((void **)&(d_value), sizeof(real) * table_size));
BS = 1024;
GS = div_round_up(table_size, BS);
hipLaunchKernelGGL(( init_check), dim3(GS), dim3(BS), 0, bin->stream[i], d_check, table_size);
hipLaunchKernelGGL(( init_value), dim3(GS), dim3(BS), 0, bin->stream[i], d_value, table_size);
GS = bin->bin_size[i];
hipLaunchKernelGGL(( calculate_value_col_bin_each_gl), dim3(GS), dim3(BS), 0, bin->stream[i],
d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
d_check, d_value, max_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
hipFree(d_check);
hipFree(d_value);
}
break;
default :
exit(0);
}
}
}
hipDeviceSynchronize();
}
| eb052fff4bd228589aaaf9cabc869640ba2811e2.cu | #define BIN_NUM 7
#define PWARP 4
#define IMB_PWMIN 32
#define B_PWMIN 16
#define IMB_MIN 512
#define B_MIN 256
#define IMB_PW_SH_SIZE 2048
#define B_PW_SH_SIZE 1024
#define IMB_SH_SIZE 1024
#define B_SH_SIZE 512
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#include <helper_cuda.h>
#include <cusparse_v2.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <nsparse.h>
#include <nsparse_asm.h>
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to COMP_SH_SIZE
#define ONSTREAM
void init_bin(sfBIN *bin, int M)
{
int i;
bin->stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * BIN_NUM);
for (i = 0; i < BIN_NUM; i++) {
cudaStreamCreate(&(bin->stream[i]));
}
bin->bin_size = (int *)malloc(sizeof(int) * BIN_NUM);
bin->bin_offset = (int *)malloc(sizeof(int) * BIN_NUM);
checkCudaErrors(cudaMalloc((void **)&(bin->d_row_perm), sizeof(int) * M));
checkCudaErrors(cudaMalloc((void **)&(bin->d_row_nz), sizeof(int) * (M + 1)));
checkCudaErrors(cudaMalloc((void **)&(bin->d_max), sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&(bin->d_bin_size), sizeof(int) * BIN_NUM));
checkCudaErrors(cudaMalloc((void **)&(bin->d_bin_offset), sizeof(int) * BIN_NUM));
i = 0;
bin->max_intprod = 0;
bin->max_nz = 0;
}
void release_bin(sfBIN bin)
{
int i;
cudaFree(bin.d_row_nz);
cudaFree(bin.d_row_perm);
cudaFree(bin.d_max);
cudaFree(bin.d_bin_size);
cudaFree(bin.d_bin_offset);
free(bin.bin_size);
free(bin.bin_offset);
for (i = 0; i < BIN_NUM; i++) {
cudaStreamDestroy(bin.stream[i]);
}
free(bin.stream);
}
__global__ void set_intprod_num(int *d_arpt, int *d_acol,
const int* __restrict__ d_brpt,
int *d_row_intprod, int *d_max_intprod,
int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = 0;
int j;
for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) {
nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]];
}
d_row_intprod[i] = nz_per_row;
atomicMax(d_max_intprod, nz_per_row);
}
__global__ void set_bin(int *d_row_nz, int *d_bin_size, int *d_max,
int M, int min, int mmin)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_row_nz[i];
atomicMax(d_max, nz_per_row);
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= (mmin)) {
atomicAdd(d_bin_size + j, 1);
}
else {
atomicAdd(d_bin_size + j + 1, 1);
}
return;
}
}
atomicAdd(d_bin_size + BIN_NUM - 1, 1);
}
__global__ void init_row_perm(int *d_permutation, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
d_permutation[i] = i;
}
__global__ void set_row_perm(int *d_bin_size, int *d_bin_offset,
int *d_max_row_nz, int *d_row_perm,
int M, int min, int mmin)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_max_row_nz[i];
int dest;
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= mmin) {
dest = atomicAdd(d_bin_size + j, 1);
d_row_perm[d_bin_offset[j] + dest] = i;
}
else {
dest = atomicAdd(d_bin_size + j + 1, 1);
d_row_perm[d_bin_offset[j + 1] + dest] = i;
}
return;
}
}
dest = atomicAdd(d_bin_size + BIN_NUM - 1, 1);
d_row_perm[d_bin_offset[BIN_NUM - 1] + dest] = i;
}
void set_max_bin(int *d_arpt, int *d_acol, int *d_brpt, sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
checkCudaErrors(cudaMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), cudaMemcpyHostToDevice));
//checkCudaErrors( cudaMemset(bin->d_bin_size, 0, sizeof(int) * BIN_NUM) );
//checkCudaErrors(cudaMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), cudaMemcpyHostToDevice));
BS = 1024;
GS = div_round_up(M, BS);
set_intprod_num<<<GS, BS>>>(d_arpt, d_acol, d_brpt, bin->d_row_nz, bin->d_max, M);
checkCudaErrors(cudaMemcpy(&(bin->max_intprod), bin->d_max, sizeof(int), cudaMemcpyDeviceToHost));
if (bin->max_intprod > IMB_PWMIN) {
set_bin<<<GS, BS>>>(bin->d_row_nz, bin->d_bin_size, bin->d_max, M, IMB_MIN, IMB_PWMIN);
checkCudaErrors(cudaMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice));
//checkCudaErrors( cudaMemset(bin->d_bin_size, 0, sizeof(int) * BIN_NUM) );
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
checkCudaErrors(cudaMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice));
set_row_perm<<<GS, BS>>>(bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, IMB_MIN, IMB_PWMIN);
}else{
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
init_row_perm<<<GS, BS>>>(bin->d_row_perm, M);
}
}
void set_min_bin(sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
cudaMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(bin->d_max, &(bin->max_nz), sizeof(int), cudaMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
set_bin<<<GS, BS>>>(bin->d_row_nz, bin->d_bin_size,
bin->d_max,
M, B_MIN, B_PWMIN);
cudaMemcpy(&(bin->max_nz), bin->d_max, sizeof(int), cudaMemcpyDeviceToHost);
if (bin->max_nz > B_PWMIN) {
cudaMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, cudaMemcpyDeviceToHost);
cudaMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
cudaMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
set_row_perm<<<GS, BS>>>(bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, B_MIN, B_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
BS = 1024;
GS = div_round_up(M, BS);
init_row_perm<<<GS, BS>>>(bin->d_row_perm, M);
}
}
__global__ void init_value(real *d_val, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_val[i] = 0;
}
__global__ void init_check(int *d_check, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_check[i] = -1;
}
__global__ void set_row_nz_bin_pwarp(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz,
int bin_offset, int M) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j, k;
int soffset;
int acol, bcol, key, hash, adr, nz, old;
__shared__ int check[IMB_PW_SH_SIZE];
soffset = local_rid * IMB_PWMIN;
for (j = tid; j < IMB_PWMIN; j += PWARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
rid = d_row_perm[rid + bin_offset];
nz = 0;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (IMB_PWMIN - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (IMB_PWMIN - 1);
adr = soffset + hash;
}
}
}
}
for (j = PWARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int bin_offset, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j, k, l;
int bcol, key, hash, old;
int nz, adr;
int acol, ccol;
int soffset;
soffset = wid * SH_ROW;
__shared__ int check[IMB_SH_SIZE];
for (j = tid; j < SH_ROW; j += WARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
acol = 0;
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) acol = ld_gbl_int32(d_acol + j + tid);
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
ccol = __shfl(acol, l);
for (k = d_brpt[ccol] + tid; k < d_brpt[ccol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
__shared__ int check[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (rid >= M) {
return;
}
__syncthreads();
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
__syncthreads();
if (threadIdx.x == 0) {
check[0] = 0;
}
__syncthreads();
if (tid == 0) {
atomicAdd(check, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = check[0];
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb_large(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int *d_fail_count, int *d_fail_perm,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int adr;
int acol;
__shared__ int check[SH_ROW];
__shared__ int snz[1];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (threadIdx.x == 0) {
snz[0] = 0;
}
if (rid >= M) {
return;
}
__syncthreads();
rid = d_row_perm[rid + bin_offset];
int count = 0;
int border = SH_ROW >> 1;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (count < border && snz[0] < border) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
atomicAdd(snz, 1);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
count++;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
__syncthreads();
if (count >= border || snz[0] >= border) {
if (threadIdx.x == 0) {
int d = atomicAdd(d_fail_count, 1);
d_fail_perm[d] = rid;
}
}
else {
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
}
__global__ void set_row_nz_bin_each_gl(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int *d_check,
int max_row_nz, int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
int offset = rid * max_row_nz;
__shared__ int snz[1];
if (threadIdx.x == 0) {
snz[0] = 0;
}
__syncthreads();
if (rid >= M) {
return;
}
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = offset + hash;
while (1) {
if (d_check[adr] == key) {
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = offset + hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
atomicAdd(snz, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M, int *nnz);
__global__ void calculate_value_col_bin_pwarp(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j;
__shared__ int shared_check[B_PW_SH_SIZE];
__shared__ real shared_value[B_PW_SH_SIZE];
int soffset = local_rid * (B_PWMIN);
for (j = tid; j < (B_PWMIN); j += PWARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int k;
int acol, bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & ((B_PWMIN) - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & ((B_PWMIN) - 1);
adr = soffset + hash;
}
}
}
}
for (j = tid; j < (B_PWMIN); j += PWARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
// Sorting for shared data
int count, target;
for (j = tid; j < nz; j += PWARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j;
__shared__ int shared_check[B_SH_SIZE];
__shared__ real shared_value[B_SH_SIZE];
int soffset = wid * SH_ROW;
for (j = tid; j < SH_ROW; j += WARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int lacol, acol;
int k, l;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real laval, aval, bval;
lacol = 0;
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) {
lacol = ld_gbl_int32(d_acol + j + tid);
laval = ld_gbl_real(d_aval + j + tid);
}
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
acol = __shfl(lacol, l);
aval = __shfl(laval, l);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = tid; j < nz; j += WARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each_tb(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
__shared__ int shared_check[SH_ROW];
__shared__ real shared_value[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
shared_check[j] = -1;
shared_value[j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
else if (shared_check[hash] == -1) {
old = atomicCAS(shared_check + hash, -1, key);
if (old == -1) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[index] = shared_check[j];
shared_value[index] = shared_value[j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = shared_check[j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[j];
d_cval[offset + count] = shared_value[j];
}
}
__global__ void calculate_value_col_bin_each_gl(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int *d_check,
real *d_value,
int max_row_nz,
int bin_offset,
int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
if (rid >= M) {
return;
}
int doffset = rid * max_row_nz;
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = doffset + hash;
while (1) {
if (d_check[adr] == key) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = doffset + hash;
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < max_row_nz; j += WARP) {
if (d_check[doffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
d_check[doffset + index] = d_check[doffset + j];
d_value[doffset + index] = d_value[doffset + j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = d_check[doffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(d_check[doffset + k] - target) >> 31;
}
d_ccol[offset + count] = d_check[doffset + j];
d_cval[offset + count] = d_value[doffset + j];
}
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M);
void spgemm_kernel_hash(sfCSR *a, sfCSR *b, sfCSR *c)
{
int M;
sfBIN bin;
M = a->M;
c->M = M;
c->N = b->N;
/* Initialize bin */
init_bin(&bin, M);
checkCudaErrors(cudaDeviceSynchronize());
/* Set max bin */
set_max_bin(a->d_rpt, a->d_col, b->d_rpt, &bin, M);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc((void **)&(c->d_rpt), sizeof(int) * (M + 1)));
/* Count nz of C */
set_row_nnz(a->d_rpt, a->d_col,
b->d_rpt, b->d_col,
c->d_rpt,
&bin,
M,
&(c->nnz));
/* Set bin */
set_min_bin(&bin, M);
checkCudaErrors(cudaMalloc((void **)&(c->d_col), sizeof(int) * c->nnz));
checkCudaErrors(cudaMalloc((void **)&(c->d_val), sizeof(real) * c->nnz));
/* Calculating value of C */
calculate_value_col_bin(a->d_rpt, a->d_col, a->d_val,
b->d_rpt, b->d_col, b->d_val,
c->d_rpt, c->d_col, c->d_val,
&bin,
M);
release_bin(bin);
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M,
int *nnz)
{
int i;
int GS, BS;
for (i = BIN_NUM - 1; i >= 0; i--) {
if (bin->bin_size[i] > 0) {
switch (i) {
case 0:
BS = 256;
GS = div_round_up(bin->bin_size[i] * PWARP, BS);
set_row_nz_bin_pwarp<<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol,
d_brpt, d_bcol,
bin->d_row_perm,
bin->d_row_nz,
bin->bin_offset[i],
bin->bin_size[i]);
break;
case 1 :
BS = 64;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb<512><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 2 :
BS = 128;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb<1024><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 3 :
BS = 256;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb<2048><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 4 :
BS = 512;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb<4096><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 5 :
BS = 1024;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb<8192><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 6 :
{
int fail_count;
int *d_fail_count, *d_fail_perm;
fail_count = 0;
checkCudaErrors(cudaMalloc((void **)&d_fail_count, sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_fail_perm, sizeof(int) * bin->bin_size[i]));
cudaMemcpy(d_fail_count, &fail_count, sizeof(int), cudaMemcpyHostToDevice);
BS = 1024;
GS = bin->bin_size[i];
set_row_nz_bin_each_tb_large<8192><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
bin->d_row_perm, bin->d_row_nz,
d_fail_count, d_fail_perm,
bin->bin_offset[i], bin->bin_size[i]);
cudaMemcpy(&fail_count, d_fail_count, sizeof(int), cudaMemcpyDeviceToHost);
if (fail_count > 0) {
int max_row_nz = bin->max_intprod;
size_t table_size = (size_t)max_row_nz * fail_count;
int *d_check;
checkCudaErrors(cudaMalloc((void **)&(d_check), sizeof(int) * table_size));
BS = 1024;
GS = div_round_up(table_size, BS);
init_check<<<GS, BS, 0, bin->stream[i]>>>(d_check, table_size);
GS = bin->bin_size[i];
set_row_nz_bin_each_gl<<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_brpt, d_bcol,
d_fail_perm, bin->d_row_nz, d_check,
max_row_nz, 0, fail_count);
cudaFree(d_check);
}
cudaFree(d_fail_count);
cudaFree(d_fail_perm);
}
break;
default :
exit(0);
}
}
}
cudaThreadSynchronize();
/* Set row pointer of matrix C */
thrust::exclusive_scan(thrust::device, bin->d_row_nz, bin->d_row_nz + (M + 1), d_crpt, 0);
cudaMemcpy(nnz, d_crpt + M, sizeof(int), cudaMemcpyDeviceToHost);
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M)
{
int i;
int GS, BS;
for (i = BIN_NUM - 1; i >= 0; i--) {
if (bin->bin_size[i] > 0) {
switch (i) {
case 0:
BS = 256;
GS = div_round_up(bin->bin_size[i] * PWARP, BS);
calculate_value_col_bin_pwarp<<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 1:
BS = 64;
GS = bin->bin_size[i];
calculate_value_col_bin_each_tb<256><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 2:
BS = 128;
GS = bin->bin_size[i];
calculate_value_col_bin_each_tb<512><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 3:
BS = 256;
GS = bin->bin_size[i];
calculate_value_col_bin_each_tb<1024><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 4:
BS = 512;
GS = bin->bin_size[i];
calculate_value_col_bin_each_tb<2048><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 5:
BS = 1024;
GS = bin->bin_size[i];
calculate_value_col_bin_each_tb<4096><<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
break;
case 6 :
{
int max_row_nz = bin->max_nz * 2;
int table_size = max_row_nz * bin->bin_size[i];
int *d_check;
real *d_value;
checkCudaErrors(cudaMalloc((void **)&(d_check), sizeof(int) * table_size));
checkCudaErrors(cudaMalloc((void **)&(d_value), sizeof(real) * table_size));
BS = 1024;
GS = div_round_up(table_size, BS);
init_check<<<GS, BS, 0, bin->stream[i]>>>(d_check, table_size);
init_value<<<GS, BS, 0, bin->stream[i]>>>(d_value, table_size);
GS = bin->bin_size[i];
calculate_value_col_bin_each_gl<<<GS, BS, 0, bin->stream[i]>>>
(d_arpt, d_acol, d_aval,
d_brpt, d_bcol, d_bval,
d_crpt, d_ccol, d_cval,
bin->d_row_perm, bin->d_row_nz,
d_check, d_value, max_row_nz,
bin->bin_offset[i], bin->bin_size[i]);
cudaFree(d_check);
cudaFree(d_value);
}
break;
default :
exit(0);
}
}
}
cudaThreadSynchronize();
}
|
2eb4bc4eb74ec56ede1f0a2b89554b0ae6c54fdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gradientLayersKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[LAYERS_GRAD_BLOCKDIM_X][LAYERS_GRAD_BLOCKDIM_Y][(LAYERS_GRAD_RESULT_STEPS + 2 * LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * LAYERS_GRAD_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * LAYERS_GRAD_BLOCKDIM_Y + threadIdx.y;
const int baseZ = (blockIdx.z * LAYERS_GRAD_RESULT_STEPS - LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
const int pitch = imageW*imageH;
//Main data
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS + LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z]= (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
float sum = 0;
sum += s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z + 1];
sum -= s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z - 1];
sum *= 0.5f;
d_Dst[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] = sum;
}
} | 2eb4bc4eb74ec56ede1f0a2b89554b0ae6c54fdf.cu | #include "includes.h"
__global__ void gradientLayersKernel( float *d_Dst, float *d_Src, int imageW, int imageH, int imageD )
{
__shared__ float s_Data[LAYERS_GRAD_BLOCKDIM_X][LAYERS_GRAD_BLOCKDIM_Y][(LAYERS_GRAD_RESULT_STEPS + 2 * LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * LAYERS_GRAD_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * LAYERS_GRAD_BLOCKDIM_Y + threadIdx.y;
const int baseZ = (blockIdx.z * LAYERS_GRAD_RESULT_STEPS - LAYERS_GRAD_HALO_STEPS) * LAYERS_GRAD_BLOCKDIM_Z + threadIdx.z;
d_Src += (baseZ * imageH + baseY) * imageW + baseX;
d_Dst += (baseZ * imageH + baseY) * imageW + baseX;
const int pitch = imageW*imageH;
//Main data
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z] = (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z >= 0) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS + LAYERS_GRAD_HALO_STEPS; i++) {
s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z]= (baseZ + i * LAYERS_GRAD_BLOCKDIM_Z < imageD) ? d_Src[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = LAYERS_GRAD_HALO_STEPS; i < LAYERS_GRAD_HALO_STEPS + LAYERS_GRAD_RESULT_STEPS; i++) {
float sum = 0;
sum += s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z + 1];
sum -= s_Data[threadIdx.x][threadIdx.y][threadIdx.z + i * LAYERS_GRAD_BLOCKDIM_Z - 1];
sum *= 0.5f;
d_Dst[i * LAYERS_GRAD_BLOCKDIM_Z * pitch] = sum;
}
} |
33e80f613fd5225828ead257b06613c3f97484d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_zero;
int xdim0_initialise_chunk_kernel_zero_h = -1;
int ydim0_initialise_chunk_kernel_zero_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_zero*(y))
//user function
__device__
void initialise_chunk_kernel_zero_gpu(double *var) {
*var = 0.0;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_initialise_chunk_kernel_zero;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_zero_gpu(arg0);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,1,range,5)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
OPS_kernels[5].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_zero_h) {
hipMemcpyToSymbol( xdim0_initialise_chunk_kernel_zero, &xdim0, sizeof(int) );
xdim0_initialise_chunk_kernel_zero_h = xdim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
char *p_a[1];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_zero), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[5].time += t1-t2;
}
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
| 33e80f613fd5225828ead257b06613c3f97484d3.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_zero;
int xdim0_initialise_chunk_kernel_zero_h = -1;
int ydim0_initialise_chunk_kernel_zero_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y) (x+xdim0_initialise_chunk_kernel_zero*(y))
//user function
__device__
void initialise_chunk_kernel_zero_gpu(double *var) {
*var = 0.0;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_zero(
double* __restrict arg0,
int size0,
int size1 ){
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_initialise_chunk_kernel_zero;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_zero_gpu(arg0);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_zero(char const *name, ops_block block, int dim, int* range,
ops_arg arg0) {
//Timing
double t1,t2,c1,c2;
ops_arg args[1] = { arg0};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args,1,range,5)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(5,"initialise_chunk_kernel_zero");
OPS_kernels[5].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<2; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<2; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_zero_h) {
cudaMemcpyToSymbol( xdim0_initialise_chunk_kernel_zero, &xdim0, sizeof(int) );
xdim0_initialise_chunk_kernel_zero_h = xdim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
char *p_a[1];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 1);
ops_halo_exchanges(args,1,range);
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_zero<<<grid, tblock >>> ( (double *)p_a[0],x_size, y_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[5].time += t1-t2;
}
ops_set_dirtybit_device(args, 1);
ops_set_halo_dirtybit3(&args[0],range);
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[5].mpi_time += t2-t1;
OPS_kernels[5].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
e8d4503b9328e1cc38e7bbfcae35c4c3380fa4fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transpose_smem_pad(int * in, int* out, int nx, int ny)
{
__shared__ int tile[BDIMY][BDIMX + IPAD];
//input index
int ix, iy, in_index;
//output index
int i_row, i_col, _1d_index, out_ix, out_iy, out_index;
//ix and iy calculation for input index
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
//input index
in_index = iy * nx + ix;
//1D index calculation fro shared memory
_1d_index = threadIdx.y * blockDim.x + threadIdx.x;
//col major row and col index calcuation
i_row = _1d_index / blockDim.y;
i_col = _1d_index % blockDim.y;
//coordinate for transpose matrix
out_ix = blockIdx.y * blockDim.y + i_col;
out_iy = blockIdx.x * blockDim.x + i_row;
//output array access in row major format
out_index = out_iy * ny + out_ix;
if (ix < nx && iy < ny)
{
//load from in array in row major and store to shared memory in row major
tile[threadIdx.y][threadIdx.x] = in[in_index];
//wait untill all the threads load the values
__syncthreads();
out[out_index] = tile[i_col][i_row];
}
} | e8d4503b9328e1cc38e7bbfcae35c4c3380fa4fa.cu | #include "includes.h"
__global__ void transpose_smem_pad(int * in, int* out, int nx, int ny)
{
__shared__ int tile[BDIMY][BDIMX + IPAD];
//input index
int ix, iy, in_index;
//output index
int i_row, i_col, _1d_index, out_ix, out_iy, out_index;
//ix and iy calculation for input index
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
//input index
in_index = iy * nx + ix;
//1D index calculation fro shared memory
_1d_index = threadIdx.y * blockDim.x + threadIdx.x;
//col major row and col index calcuation
i_row = _1d_index / blockDim.y;
i_col = _1d_index % blockDim.y;
//coordinate for transpose matrix
out_ix = blockIdx.y * blockDim.y + i_col;
out_iy = blockIdx.x * blockDim.x + i_row;
//output array access in row major format
out_index = out_iy * ny + out_ix;
if (ix < nx && iy < ny)
{
//load from in array in row major and store to shared memory in row major
tile[threadIdx.y][threadIdx.x] = in[in_index];
//wait untill all the threads load the values
__syncthreads();
out[out_index] = tile[i_col][i_row];
}
} |
7fbac226cdb44881d8b176ab48ebd8c4e2f8906f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <hipcub/hipcub.hpp>
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int32_t N>
struct Param {
const T* x[N];
int64_t x_elem_cnt[N];
int64_t* y;
int64_t num_x;
};
using CuInt64T = unsigned long long int;
__device__ __inline__ int64_t AtomicAdd(int64_t* address, int64_t val) {
static_assert(sizeof(int64_t) == sizeof(CuInt64T), "size error");
return static_cast<int64_t>(
atomicAdd(reinterpret_cast<CuInt64T*>(address), static_cast<CuInt64T>(val)));
}
template<typename T>
__global__ void CountNotFiniteGpu(const int64_t n, const T* x, int64_t* y) {
typedef hipcub::BlockReduce<int64_t, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
int64_t thread_count = 0;
CUDA_1D_KERNEL_LOOP(i, n) {
if (!isfinite(x[i])) { thread_count += 1; }
}
__syncthreads();
int64_t block_count_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_count, hipcub::Sum());
if (threadIdx.x == 0) { AtomicAdd(y, block_count_sum); }
}
template<typename T, int32_t N>
__global__ void MultiCountNotFiniteGpu(Param<T, N> param) {
typedef hipcub::BlockReduce<int64_t, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
int64_t thread_count = 0;
for (int32_t k = 0; k < param.num_x; ++k) {
CUDA_1D_KERNEL_LOOP(i, param.x_elem_cnt[k]) {
if (!isfinite(param.x[k][i])) { thread_count += 1; }
}
}
__syncthreads();
int64_t block_count_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_count, hipcub::Sum());
if (threadIdx.x == 0) { AtomicAdd(param.y, block_count_sum); }
}
constexpr int64_t kCountNotFiniteNumBlocks = 512;
int GetCountNotFiniteNumBlocks(const int64_t elem_cnt) {
return ::min((elem_cnt + kCudaThreadsNumPerBlock - 1) / kCudaThreadsNumPerBlock,
kCountNotFiniteNumBlocks);
}
} // namespace
template<typename T>
class CountNotFiniteGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
CountNotFiniteGpuKernel() = default;
~CountNotFiniteGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int64_t elem_cnt = x->shape().elem_cnt();
Memset<DeviceType::kGPU>(ctx->stream(), y->mut_dptr<int64_t>(), 0,
y->shape().elem_cnt() * sizeof(int64_t));
hipLaunchKernelGGL(( CountNotFiniteGpu<T>), dim3(GetCountNotFiniteNumBlocks(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(),
elem_cnt, x->dptr<T>(), y->mut_dptr<int64_t>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("count_not_finite") \
.SetCreateFn<CountNotFiniteGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value));
REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(float)
REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(double)
template<typename T>
class MultiCountNotFiniteGpuKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
MultiCountNotFiniteGpuKernel() = default;
~MultiCountNotFiniteGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
Param<T, 128> para;
Memset<DeviceType::kGPU>(ctx->stream(), y->mut_dptr<int64_t>(), 0,
y->shape().elem_cnt() * sizeof(int64_t));
para.y = y->mut_dptr<int64_t>();
int64_t remain_size = ctx->inputs().size();
int64_t input_id = 0;
while (remain_size > 0) {
if (remain_size > 128) {
remain_size -= 128;
para.num_x = 128;
} else {
para.num_x = remain_size;
remain_size = 0;
}
int64_t max_elem_cnt = 0;
for (int32_t i = 0; i < para.num_x; ++i) {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", input_id);
input_id++;
para.x[i] = x->dptr<T>();
para.x_elem_cnt[i] = x->shape().elem_cnt();
max_elem_cnt = ::max(max_elem_cnt, x->shape().elem_cnt());
}
hipLaunchKernelGGL(( MultiCountNotFiniteGpu<T, 128>)
, dim3(GetCountNotFiniteNumBlocks(max_elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(), para);
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("multi_count_not_finite") \
.SetCreateFn<MultiCountNotFiniteGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value));
REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(float)
REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(double)
} // namespace oneflow
| 7fbac226cdb44881d8b176ab48ebd8c4e2f8906f.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <cub/cub.cuh>
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/cuda_graph_support.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T, int32_t N>
struct Param {
const T* x[N];
int64_t x_elem_cnt[N];
int64_t* y;
int64_t num_x;
};
using CuInt64T = unsigned long long int;
__device__ __inline__ int64_t AtomicAdd(int64_t* address, int64_t val) {
static_assert(sizeof(int64_t) == sizeof(CuInt64T), "size error");
return static_cast<int64_t>(
atomicAdd(reinterpret_cast<CuInt64T*>(address), static_cast<CuInt64T>(val)));
}
template<typename T>
__global__ void CountNotFiniteGpu(const int64_t n, const T* x, int64_t* y) {
typedef cub::BlockReduce<int64_t, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
int64_t thread_count = 0;
CUDA_1D_KERNEL_LOOP(i, n) {
if (!isfinite(x[i])) { thread_count += 1; }
}
__syncthreads();
int64_t block_count_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_count, cub::Sum());
if (threadIdx.x == 0) { AtomicAdd(y, block_count_sum); }
}
template<typename T, int32_t N>
__global__ void MultiCountNotFiniteGpu(Param<T, N> param) {
typedef cub::BlockReduce<int64_t, kCudaThreadsNumPerBlock> BlockReduce;
__shared__ typename BlockReduce::TempStorage cub_reduce_tmp_storage;
int64_t thread_count = 0;
for (int32_t k = 0; k < param.num_x; ++k) {
CUDA_1D_KERNEL_LOOP(i, param.x_elem_cnt[k]) {
if (!isfinite(param.x[k][i])) { thread_count += 1; }
}
}
__syncthreads();
int64_t block_count_sum = BlockReduce(cub_reduce_tmp_storage).Reduce(thread_count, cub::Sum());
if (threadIdx.x == 0) { AtomicAdd(param.y, block_count_sum); }
}
constexpr int64_t kCountNotFiniteNumBlocks = 512;
int GetCountNotFiniteNumBlocks(const int64_t elem_cnt) {
return std::min((elem_cnt + kCudaThreadsNumPerBlock - 1) / kCudaThreadsNumPerBlock,
kCountNotFiniteNumBlocks);
}
} // namespace
template<typename T>
class CountNotFiniteGpuKernel final : public user_op::OpKernel, public user_op::CudaGraphSupport {
public:
CountNotFiniteGpuKernel() = default;
~CountNotFiniteGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", 0);
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const int64_t elem_cnt = x->shape().elem_cnt();
Memset<DeviceType::kGPU>(ctx->stream(), y->mut_dptr<int64_t>(), 0,
y->shape().elem_cnt() * sizeof(int64_t));
CountNotFiniteGpu<T><<<GetCountNotFiniteNumBlocks(elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(
elem_cnt, x->dptr<T>(), y->mut_dptr<int64_t>());
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("count_not_finite") \
.SetCreateFn<CountNotFiniteGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value));
REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(float)
REGISTER_COUNT_NOT_FINITE_GPU_KERNEL(double)
template<typename T>
class MultiCountNotFiniteGpuKernel final : public user_op::OpKernel,
public user_op::CudaGraphSupport {
public:
MultiCountNotFiniteGpuKernel() = default;
~MultiCountNotFiniteGpuKernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
user_op::Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
Param<T, 128> para;
Memset<DeviceType::kGPU>(ctx->stream(), y->mut_dptr<int64_t>(), 0,
y->shape().elem_cnt() * sizeof(int64_t));
para.y = y->mut_dptr<int64_t>();
int64_t remain_size = ctx->inputs().size();
int64_t input_id = 0;
while (remain_size > 0) {
if (remain_size > 128) {
remain_size -= 128;
para.num_x = 128;
} else {
para.num_x = remain_size;
remain_size = 0;
}
int64_t max_elem_cnt = 0;
for (int32_t i = 0; i < para.num_x; ++i) {
const user_op::Tensor* x = ctx->Tensor4ArgNameAndIndex("x", input_id);
input_id++;
para.x[i] = x->dptr<T>();
para.x_elem_cnt[i] = x->shape().elem_cnt();
max_elem_cnt = std::max(max_elem_cnt, x->shape().elem_cnt());
}
MultiCountNotFiniteGpu<T, 128>
<<<GetCountNotFiniteNumBlocks(max_elem_cnt), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(para);
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(dtype) \
REGISTER_USER_KERNEL("multi_count_not_finite") \
.SetCreateFn<MultiCountNotFiniteGpuKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \
&& (user_op::HobDataType("x", 0) == GetDataType<dtype>::value));
REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(float)
REGISTER_MULTI_COUNT_NOT_FINITE_GPU_KERNEL(double)
} // namespace oneflow
|
9f98f9eebd2245a08e61a91f994228b30c4addb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "convertToRGB.h"
__global__ static void convertToRGBKernel(const uint16_t *pV210, uint16_t *tt, int nSrcWidth,
int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) / 1000;
tt[j + k + 1] = (y0 - g) / 1000;
tt[j + k + 2] = (y0 + b) / 1000;
tt[j + k + 3] = (y1 + r) / 1000;
tt[j + k + 4] = (y1 - g) / 1000;
tt[j + k + 5] = (y1 + b) / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) / 1000;
tt[j + k + 7] = (y2 - g) / 1000;
tt[j + k + 8] = (y2 + b) / 1000;
tt[j + k + 9] = (y3 + r) / 1000;
tt[j + k + 10] = (y3 - g) / 1000;
tt[j + k + 11] = (y3 + b) / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) / 1000;
tt[j + k + 13] = (y4 - g) / 1000;
tt[j + k + 14] = (y4 + b) / 1000;
tt[j + k + 15] = (y5 + r) / 1000;
tt[j + k + 16] = (y5 - g) / 1000;
tt[j + k + 17] = (y5 + b) / 1000;
}
}
void convertToRGB(uint16_t *dpSrc, uint16_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight, hipStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight);
}
__global__ static void convertToRGBTestKernel(const uint16_t *pV210, uint8_t *tt, int nSrcWidth,
int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) * 0.249 / 1000;
tt[j + k + 1] = (y0 - g) * 0.249 / 1000;
tt[j + k + 2] = (y0 + b) * 0.249 / 1000;
tt[j + k + 3] = (y1 + r) * 0.249 / 1000;
tt[j + k + 4] = (y1 - g) * 0.249 / 1000;
tt[j + k + 5] = (y1 + b) * 0.249 / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) * 0.249 / 1000;
tt[j + k + 7] = (y2 - g) * 0.249 / 1000;
tt[j + k + 8] = (y2 + b) * 0.249 / 1000;
tt[j + k + 9] = (y3 + r) * 0.249 / 1000;
tt[j + k + 10] = (y3 - g) * 0.249 / 1000;
tt[j + k + 11] = (y3 + b) * 0.249 / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) * 0.249 / 1000;
tt[j + k + 13] = (y4 - g) * 0.249 / 1000;
tt[j + k + 14] = (y4 + b) * 0.249 / 1000;
tt[j + k + 15] = (y5 + r) * 0.249 / 1000;
tt[j + k + 16] = (y5 - g) * 0.249 / 1000;
tt[j + k + 17] = (y5 + b) * 0.249 / 1000;
}
}
void convertToRGBTest(uint16_t *dpSrc, uint8_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight,
hipStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertToRGBTestKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight);
}
__global__ static void convertVToRGBKernel(const uint16_t *pV210, uint8_t *tt1, int nSrcWidth,
int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
tt1[j + k + 6] = lookupTable[tt[0]];
tt1[j + k + 7] = lookupTable[tt[1]];
tt1[j + k + 8] = lookupTable[tt[2]];
tt1[j + k + 9] = lookupTable[tt[3]];
tt1[j + k + 10] = lookupTable[tt[4]];
tt1[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
tt1[j + k + 12] = lookupTable[tt[0]];
tt1[j + k + 13] = lookupTable[tt[1]];
tt1[j + k + 14] = lookupTable[tt[2]];
tt1[j + k + 15] = lookupTable[tt[3]];
tt1[j + k + 16] = lookupTable[tt[4]];
tt1[j + k + 17] = lookupTable[tt[5]];
}
}
__global__ static void convertPToRGBKernel(const uint16_t *dpSrc, uint8_t *tt1, int nSrcWidth,
int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y1;
uint16_t tt[6];
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int k = tid * 2;
int j = tidd * nSrcWidth;
y0 = (uint32_t)dpSrc[j + k + 0] * 1000;
y1 = (uint32_t)dpSrc[j + k + 1] * 1000;
k = tid;
j = tidd * nSrcWidth / 2 + nDstHeight * nSrcWidth;
u0 = (uint32_t)dpSrc[j + k + 0];
j = tidd * nSrcWidth / 2 + nDstHeight * nSrcWidth * 3 / 2;
v0 = (uint32_t)dpSrc[j + k + 0];
k = tid * 6;
j = tidd * nDstWidth * 3;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
}
}
void convertToRGB(uint16_t *dpSrc, uint8_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight,
int *lookupTable, yuv_format yuvFormat, hipStream_t stream) {
if (yuvFormat == PACKED) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertVToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight, lookupTable);
}
else if (yuvFormat == PLANAR) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (((nDstHeight * 2) + blocks.y) - 1) / blocks.y, 1);
convertPToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight, lookupTable);
}
}
__global__ static void convertToNppiKernel(uint16_t *dSrc, uint8_t *dDst,
int nSrcWidth, int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)dSrc[j + k + 0] + ((uint32_t)dSrc[j + k + 1] << 16);
pF.y = (uint32_t)dSrc[j + k + 2] + ((uint32_t)dSrc[j + k + 3] << 16);
pF.z = (uint32_t)dSrc[j + k + 4] + ((uint32_t)dSrc[j + k + 5] << 16);
pF.w = (uint32_t)dSrc[j + k + 6] + ((uint32_t)dSrc[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
dDst[j + k + 0] = lookupTable[tt[0]];
dDst[j + k + 1] = lookupTable[tt[1]];
dDst[j + k + 2] = lookupTable[tt[2]];
dDst[j + k + 3] = lookupTable[tt[3]];
dDst[j + k + 4] = lookupTable[tt[4]];
dDst[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
dDst[j + k + 6] = lookupTable[tt[0]];
dDst[j + k + 7] = lookupTable[tt[1]];
dDst[j + k + 8] = lookupTable[tt[2]];
dDst[j + k + 9] = lookupTable[tt[3]];
dDst[j + k + 10] = lookupTable[tt[4]];
dDst[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
dDst[j + k + 12] = lookupTable[tt[0]];
dDst[j + k + 13] = lookupTable[tt[1]];
dDst[j + k + 14] = lookupTable[tt[2]];
dDst[j + k + 15] = lookupTable[tt[3]];
dDst[j + k + 16] = lookupTable[tt[4]];
dDst[j + k + 17] = lookupTable[tt[5]];
}
}
void convertToRGBNpp(uint16_t *dSrc, uint8_t *dDst, int nSrcW, int nDstW, int nDstH,
int *lookupTable, hipStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcW + blocks.x - 1) / blocks.x, (nDstH + blocks.y - 1) / blocks.y, 1);
convertToNppiKernel <<<grids, blocks, 0, stream >> > (dSrc, dDst, nSrcW, nDstW, nDstH, lookupTable);
} | 9f98f9eebd2245a08e61a91f994228b30c4addb5.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "convertToRGB.h"
__global__ static void convertToRGBKernel(const uint16_t *pV210, uint16_t *tt, int nSrcWidth,
int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) / 1000;
tt[j + k + 1] = (y0 - g) / 1000;
tt[j + k + 2] = (y0 + b) / 1000;
tt[j + k + 3] = (y1 + r) / 1000;
tt[j + k + 4] = (y1 - g) / 1000;
tt[j + k + 5] = (y1 + b) / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) / 1000;
tt[j + k + 7] = (y2 - g) / 1000;
tt[j + k + 8] = (y2 + b) / 1000;
tt[j + k + 9] = (y3 + r) / 1000;
tt[j + k + 10] = (y3 - g) / 1000;
tt[j + k + 11] = (y3 + b) / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) / 1000;
tt[j + k + 13] = (y4 - g) / 1000;
tt[j + k + 14] = (y4 + b) / 1000;
tt[j + k + 15] = (y5 + r) / 1000;
tt[j + k + 16] = (y5 - g) / 1000;
tt[j + k + 17] = (y5 + b) / 1000;
}
}
void convertToRGB(uint16_t *dpSrc, uint16_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight, cudaStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight);
}
__global__ static void convertToRGBTestKernel(const uint16_t *pV210, uint8_t *tt, int nSrcWidth,
int nDstWidth, int nDstHeight) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint4 pF;
if (tid < (nSrcWidth / 8) && tidd < nDstHeight) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[j + k + 0] = (y0 + r) * 0.249 / 1000;
tt[j + k + 1] = (y0 - g) * 0.249 / 1000;
tt[j + k + 2] = (y0 + b) * 0.249 / 1000;
tt[j + k + 3] = (y1 + r) * 0.249 / 1000;
tt[j + k + 4] = (y1 - g) * 0.249 / 1000;
tt[j + k + 5] = (y1 + b) * 0.249 / 1000;
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[j + k + 6] = (y2 + r) * 0.249 / 1000;
tt[j + k + 7] = (y2 - g) * 0.249 / 1000;
tt[j + k + 8] = (y2 + b) * 0.249 / 1000;
tt[j + k + 9] = (y3 + r) * 0.249 / 1000;
tt[j + k + 10] = (y3 - g) * 0.249 / 1000;
tt[j + k + 11] = (y3 + b) * 0.249 / 1000;
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[j + k + 12] = (y4 + r) * 0.249 / 1000;
tt[j + k + 13] = (y4 - g) * 0.249 / 1000;
tt[j + k + 14] = (y4 + b) * 0.249 / 1000;
tt[j + k + 15] = (y5 + r) * 0.249 / 1000;
tt[j + k + 16] = (y5 - g) * 0.249 / 1000;
tt[j + k + 17] = (y5 + b) * 0.249 / 1000;
}
}
void convertToRGBTest(uint16_t *dpSrc, uint8_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight,
cudaStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertToRGBTestKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight);
}
__global__ static void convertVToRGBKernel(const uint16_t *pV210, uint8_t *tt1, int nSrcWidth,
int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)pV210[j + k + 0] + ((uint32_t)pV210[j + k + 1] << 16);
pF.y = (uint32_t)pV210[j + k + 2] + ((uint32_t)pV210[j + k + 3] << 16);
pF.z = (uint32_t)pV210[j + k + 4] + ((uint32_t)pV210[j + k + 5] << 16);
pF.w = (uint32_t)pV210[j + k + 6] + ((uint32_t)pV210[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
tt1[j + k + 6] = lookupTable[tt[0]];
tt1[j + k + 7] = lookupTable[tt[1]];
tt1[j + k + 8] = lookupTable[tt[2]];
tt1[j + k + 9] = lookupTable[tt[3]];
tt1[j + k + 10] = lookupTable[tt[4]];
tt1[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
tt1[j + k + 12] = lookupTable[tt[0]];
tt1[j + k + 13] = lookupTable[tt[1]];
tt1[j + k + 14] = lookupTable[tt[2]];
tt1[j + k + 15] = lookupTable[tt[3]];
tt1[j + k + 16] = lookupTable[tt[4]];
tt1[j + k + 17] = lookupTable[tt[5]];
}
}
__global__ static void convertPToRGBKernel(const uint16_t *dpSrc, uint8_t *tt1, int nSrcWidth,
int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y1;
uint16_t tt[6];
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 2;
if (tid < nDstW && tidd < nDstH) {
int k = tid * 2;
int j = tidd * nSrcWidth;
y0 = (uint32_t)dpSrc[j + k + 0] * 1000;
y1 = (uint32_t)dpSrc[j + k + 1] * 1000;
k = tid;
j = tidd * nSrcWidth / 2 + nDstHeight * nSrcWidth;
u0 = (uint32_t)dpSrc[j + k + 0];
j = tidd * nSrcWidth / 2 + nDstHeight * nSrcWidth * 3 / 2;
v0 = (uint32_t)dpSrc[j + k + 0];
k = tid * 6;
j = tidd * nDstWidth * 3;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
tt1[j + k + 0] = lookupTable[tt[0]];
tt1[j + k + 1] = lookupTable[tt[1]];
tt1[j + k + 2] = lookupTable[tt[2]];
tt1[j + k + 3] = lookupTable[tt[3]];
tt1[j + k + 4] = lookupTable[tt[4]];
tt1[j + k + 5] = lookupTable[tt[5]];
}
}
void convertToRGB(uint16_t *dpSrc, uint8_t *dpDst, int nSrcWidth, int nDstWidth, int nDstHeight,
int *lookupTable, yuv_format yuvFormat, cudaStream_t stream) {
if (yuvFormat == PACKED) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (nDstHeight + blocks.y - 1) / blocks.y, 1);
convertVToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight, lookupTable);
}
else if (yuvFormat == PLANAR) {
dim3 blocks(32, 32, 1);
dim3 grids((nSrcWidth + blocks.x - 1) / blocks.x, (((nDstHeight * 2) + blocks.y) - 1) / blocks.y, 1);
convertPToRGBKernel << <grids, blocks, 0, stream >> > (dpSrc, dpDst, nSrcWidth, nDstWidth, nDstHeight, lookupTable);
}
}
__global__ static void convertToNppiKernel(uint16_t *dSrc, uint8_t *dDst,
int nSrcWidth, int nDstWidth, int nDstHeight, int *lookupTable) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tidd = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t v0, y0, u0, y2, u1, y1, u2, y3, v1, y5, v2, y4;
uint16_t tt[6];
uint4 pF;
int nDstH = nDstHeight;
int nDstW = nSrcWidth / 8;
if (tid < nDstW && tidd < nDstH) {
int j = tidd * nSrcWidth;
int k = tid * 8;
pF.x = (uint32_t)dSrc[j + k + 0] + ((uint32_t)dSrc[j + k + 1] << 16);
pF.y = (uint32_t)dSrc[j + k + 2] + ((uint32_t)dSrc[j + k + 3] << 16);
pF.z = (uint32_t)dSrc[j + k + 4] + ((uint32_t)dSrc[j + k + 5] << 16);
pF.w = (uint32_t)dSrc[j + k + 6] + ((uint32_t)dSrc[j + k + 7] << 16);
v0 = (uint32_t)((pF.x & 0x3FF00000) >> 20);
y0 = (uint32_t)((pF.x & 0x000FFC00) >> 10) * 1000;
u0 = (uint32_t)(pF.x & 0x000003FF);
y2 = (uint32_t)((pF.y & 0x3FF00000) >> 20) * 1000;
u1 = (uint32_t)((pF.y & 0x000FFC00) >> 10);
y1 = (uint32_t)(pF.y & 0x000003FF) * 1000;
u2 = (uint32_t)((pF.z & 0x3FF00000) >> 20);
y3 = (uint32_t)((pF.z & 0x000FFC00) >> 10) * 1000;
v1 = (uint32_t)(pF.z & 0x000003FF);
y5 = (uint32_t)((pF.w & 0x3FF00000) >> 20) * 1000;
v2 = (uint32_t)((pF.w & 0x000FFC00) >> 10);
y4 = (uint32_t)(pF.w & 0x000003FF) * 1000;
k = tid * 18;
j *= 9;
j /= 4;
int r = 1407 * v0 - 720384, g = 716 * v0 + 345 * u0 - 543232, b = 1779 * u0 - 910848;
tt[0] = (y0 + r) / 1000;
tt[1] = (y0 - g) / 1000;
tt[2] = (y0 + b) / 1000;
tt[3] = (y1 + r) / 1000;
tt[4] = (y1 - g) / 1000;
tt[5] = (y1 + b) / 1000;
dDst[j + k + 0] = lookupTable[tt[0]];
dDst[j + k + 1] = lookupTable[tt[1]];
dDst[j + k + 2] = lookupTable[tt[2]];
dDst[j + k + 3] = lookupTable[tt[3]];
dDst[j + k + 4] = lookupTable[tt[4]];
dDst[j + k + 5] = lookupTable[tt[5]];
r = 1407 * v1 - 720384, g = 716 * v1 + 345 * u1 - 543232, b = 1779 * u1 - 910848;
tt[0] = (y2 + r) / 1000;
tt[1] = (y2 - g) / 1000;
tt[2] = (y2 + b) / 1000;
tt[3] = (y3 + r) / 1000;
tt[4] = (y3 - g) / 1000;
tt[5] = (y3 + b) / 1000;
dDst[j + k + 6] = lookupTable[tt[0]];
dDst[j + k + 7] = lookupTable[tt[1]];
dDst[j + k + 8] = lookupTable[tt[2]];
dDst[j + k + 9] = lookupTable[tt[3]];
dDst[j + k + 10] = lookupTable[tt[4]];
dDst[j + k + 11] = lookupTable[tt[5]];
r = 1407 * v2 - 720384, g = 716 * v2 + 345 * u2 - 543232, b = 1779 * u2 - 910848;
tt[0] = (y4 + r) / 1000;
tt[1] = (y4 - g) / 1000;
tt[2] = (y4 + b) / 1000;
tt[3] = (y5 + r) / 1000;
tt[4] = (y5 - g) / 1000;
tt[5] = (y5 + b) / 1000;
dDst[j + k + 12] = lookupTable[tt[0]];
dDst[j + k + 13] = lookupTable[tt[1]];
dDst[j + k + 14] = lookupTable[tt[2]];
dDst[j + k + 15] = lookupTable[tt[3]];
dDst[j + k + 16] = lookupTable[tt[4]];
dDst[j + k + 17] = lookupTable[tt[5]];
}
}
void convertToRGBNpp(uint16_t *dSrc, uint8_t *dDst, int nSrcW, int nDstW, int nDstH,
int *lookupTable, cudaStream_t stream) {
dim3 blocks(32, 16, 1);
dim3 grids((nSrcW + blocks.x - 1) / blocks.x, (nDstH + blocks.y - 1) / blocks.y, 1);
convertToNppiKernel <<<grids, blocks, 0, stream >> > (dSrc, dDst, nSrcW, nDstW, nDstH, lookupTable);
} |
22dbbb67a1ffe59e10ef676277beafd3b424ea2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* \brief A helper class for {@link MultiStageMeanfieldLayer} class, which is the Caffe layer that implements the
* CRF-RNN described in the paper: Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* This class itself is not a proper Caffe layer although it behaves like one to some degree.
*
* \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su.
* \version 1.0
* \date 2015
* \copyright Torr Vision Group, University of Oxford.
* \details If you use this code, please consider citing the paper:
* Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du,
* Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision.
*/
#include <vector>
#include <math.h>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/loss_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/crf_layers/message_passing_layer.hpp"
#include "caffe/crf_layers/pixel_access.hpp"
#include "pixel_access.cu"
namespace caffe {
template <typename Dtype>
__global__ void conv_kernel(const int nthreads, const Dtype* bottom, const Dtype* kernel, const Dtype* mask_data,Dtype* top, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % C;
const int n = index / W / H / C;
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int neighIdx=0;
Dtype sum_value=0.0;
bool interaction_exist =false;
if(user_interaction_constrain){
interaction_exist = get_gpu_pixel(mask_data, N, 1, H, W, n, 0, h, w)>0.0;
}
if(!(user_interaction_constrain && interaction_exist)){
for(int i = -kr; i <= kr; i++)
{
for(int j = -kr; j <= kr; j++)
{
if(i==0 && j==0) continue;
Dtype value = get_gpu_pixel(bottom, N, C, H, W, n, c, i+h, j+w);
Dtype weidht= get_gpu_pixel(kernel, N, neighN, H, W, n, neighIdx, h, w);
sum_value += value*weidht;
neighIdx++;
}
}
}
set_gpu_pixel(top, N, C, H, W, n, c, h, w, sum_value);
}
}
template <typename Dtype>
__global__ void conv_gradient_to_input_kernel(const int nthreads, const Dtype* top_diff, const Dtype* kernel_data, const Dtype * mask_data, Dtype* bottom_diff, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % C;
const int n = index / W / H / C;
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int q_index=0;
Dtype value_diff = 0.0;
for(int i = -kr; i <= kr; i++)
{
for(int j = -kr; j <= kr; j++)
{
if(i==0 && j==0) continue;
int nq_index = neighN-1 -q_index;
Dtype weight_nq = get_gpu_pixel(kernel_data, N, neighN, H, W, n, nq_index, i+h, j+w);
if(user_interaction_constrain &&
get_gpu_pixel(mask_data, N, 1, H, W, n, 0, i+h, j+w)){
weight_nq = 0;
}
Dtype t_diff_nq = get_gpu_pixel(top_diff, N, C, H, W, n, c, i+h, j+w);
value_diff += weight_nq* t_diff_nq;
q_index++;
}
}
set_gpu_pixel(bottom_diff, N, C, H, W, n, c, h, w, value_diff);
}
}
template <typename Dtype>
__global__ void conv_gradient_to_weight_kernel(const int nthreads, const Dtype* top_diff, const Dtype* bottom_data, const Dtype * mask_data, Dtype* kernel_diff, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % neighN;
const int n = index / W / H / neighN;
if(user_interaction_constrain &&
get_gpu_pixel(mask_data, N, 1, H, W, n, 0, h, w)){
return;
}
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int cN = (c >= kr*kernel_size + kr)? c+1 : c;
int j = cN % kernel_size - kr;
int i = cN / kernel_size - kr;
Dtype k_diff = 0;
for(int cIdx = 0; cIdx<C; cIdx++)
{
Dtype t_diff = get_gpu_pixel(top_diff, N, C, H, W, n, cIdx, h, w);
Dtype value = get_gpu_pixel(bottom_data, N, C, H, W, n, cIdx, h+i, w+j);
k_diff += value * t_diff;
}
//k_diff = k_diff / C;
set_gpu_pixel(kernel_diff, N, neighN, H, W, n, c, h, w, k_diff);
}
}
template <typename Dtype>
void MessagePassingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype * input_data = bottom[0]->gpu_data();
const Dtype * kernel_data = bottom[1]->gpu_data();
const Dtype * mask_data = (user_interaction_constrain_)? bottom[2]->gpu_data(): NULL;
Dtype * output_data=top[0]->mutable_gpu_data();
CHECK_EQ(bottom[0]->count(), top[0]->count())<<
("input image and output image shoud have the same size");
CHECK(bottom[0]->height() == bottom[1]->height() && bottom[0]->width() == bottom[1]->width())<<
("input image and kernel shoud have the pixel number");
int count = top[0]->count();
hipLaunchKernelGGL(( conv_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, input_data, kernel_data, mask_data, output_data,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
}
template <typename Dtype>
void MessagePassingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
//LOG(INFO) << ("message pasing backward_gpu start.");
const Dtype * top_diff = top[0]->gpu_diff();
const Dtype * bottom_data = bottom[0]->gpu_data();
Dtype * bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype * kernel_data = bottom[1]->gpu_data();
Dtype * kernel_diff = bottom[1]->mutable_gpu_diff();
const Dtype * mask_data = (user_interaction_constrain_)? bottom[2]->gpu_data(): NULL;
int bottom_count = bottom[0]->count();
hipLaunchKernelGGL(( conv_gradient_to_input_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
bottom_count, top_diff, kernel_data, mask_data, bottom_diff,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
int kernel_count = bottom[1]->count();
hipLaunchKernelGGL(( conv_gradient_to_weight_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(kernel_count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
kernel_count, top_diff, bottom_data, mask_data, kernel_diff,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
}
INSTANTIATE_LAYER_GPU_FUNCS(MessagePassingLayer);
} // namespace caffe
| 22dbbb67a1ffe59e10ef676277beafd3b424ea2b.cu | /*!
* \brief A helper class for {@link MultiStageMeanfieldLayer} class, which is the Caffe layer that implements the
* CRF-RNN described in the paper: Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* This class itself is not a proper Caffe layer although it behaves like one to some degree.
*
* \authors Sadeep Jayasumana, Bernardino Romera-Paredes, Shuai Zheng, Zhizhong Su.
* \version 1.0
* \date 2015
* \copyright Torr Vision Group, University of Oxford.
* \details If you use this code, please consider citing the paper:
* Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du,
* Chang Huang, Philip H. S. Torr. Conditional Random Fields as Recurrent Neural Networks. IEEE ICCV 2015.
*
* For more information about CRF-RNN, please visit the project website http://crfasrnn.torr.vision.
*/
#include <vector>
#include <math.h>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/loss_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/crf_layers/message_passing_layer.hpp"
#include "caffe/crf_layers/pixel_access.hpp"
#include "pixel_access.cu"
namespace caffe {
template <typename Dtype>
__global__ void conv_kernel(const int nthreads, const Dtype* bottom, const Dtype* kernel, const Dtype* mask_data,Dtype* top, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % C;
const int n = index / W / H / C;
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int neighIdx=0;
Dtype sum_value=0.0;
bool interaction_exist =false;
if(user_interaction_constrain){
interaction_exist = get_gpu_pixel(mask_data, N, 1, H, W, n, 0, h, w)>0.0;
}
if(!(user_interaction_constrain && interaction_exist)){
for(int i = -kr; i <= kr; i++)
{
for(int j = -kr; j <= kr; j++)
{
if(i==0 && j==0) continue;
Dtype value = get_gpu_pixel(bottom, N, C, H, W, n, c, i+h, j+w);
Dtype weidht= get_gpu_pixel(kernel, N, neighN, H, W, n, neighIdx, h, w);
sum_value += value*weidht;
neighIdx++;
}
}
}
set_gpu_pixel(top, N, C, H, W, n, c, h, w, sum_value);
}
}
template <typename Dtype>
__global__ void conv_gradient_to_input_kernel(const int nthreads, const Dtype* top_diff, const Dtype* kernel_data, const Dtype * mask_data, Dtype* bottom_diff, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % C;
const int n = index / W / H / C;
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int q_index=0;
Dtype value_diff = 0.0;
for(int i = -kr; i <= kr; i++)
{
for(int j = -kr; j <= kr; j++)
{
if(i==0 && j==0) continue;
int nq_index = neighN-1 -q_index;
Dtype weight_nq = get_gpu_pixel(kernel_data, N, neighN, H, W, n, nq_index, i+h, j+w);
if(user_interaction_constrain &&
get_gpu_pixel(mask_data, N, 1, H, W, n, 0, i+h, j+w)){
weight_nq = 0;
}
Dtype t_diff_nq = get_gpu_pixel(top_diff, N, C, H, W, n, c, i+h, j+w);
value_diff += weight_nq* t_diff_nq;
q_index++;
}
}
set_gpu_pixel(bottom_diff, N, C, H, W, n, c, h, w, value_diff);
}
}
template <typename Dtype>
__global__ void conv_gradient_to_weight_kernel(const int nthreads, const Dtype* top_diff, const Dtype* bottom_data, const Dtype * mask_data, Dtype* kernel_diff, int N, int C, int H, int W, int neighN, bool user_interaction_constrain) {
CUDA_KERNEL_LOOP(index, nthreads){
const int w = index % W;
const int h = (index/W) % H;
const int c = (index / W / H) % neighN;
const int n = index / W / H / neighN;
if(user_interaction_constrain &&
get_gpu_pixel(mask_data, N, 1, H, W, n, 0, h, w)){
return;
}
int kernel_size = sqrt(double(neighN+1));
int kr = (kernel_size -1)/2;
int cN = (c >= kr*kernel_size + kr)? c+1 : c;
int j = cN % kernel_size - kr;
int i = cN / kernel_size - kr;
Dtype k_diff = 0;
for(int cIdx = 0; cIdx<C; cIdx++)
{
Dtype t_diff = get_gpu_pixel(top_diff, N, C, H, W, n, cIdx, h, w);
Dtype value = get_gpu_pixel(bottom_data, N, C, H, W, n, cIdx, h+i, w+j);
k_diff += value * t_diff;
}
//k_diff = k_diff / C;
set_gpu_pixel(kernel_diff, N, neighN, H, W, n, c, h, w, k_diff);
}
}
template <typename Dtype>
void MessagePassingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
const Dtype * input_data = bottom[0]->gpu_data();
const Dtype * kernel_data = bottom[1]->gpu_data();
const Dtype * mask_data = (user_interaction_constrain_)? bottom[2]->gpu_data(): NULL;
Dtype * output_data=top[0]->mutable_gpu_data();
CHECK_EQ(bottom[0]->count(), top[0]->count())<<
("input image and output image shoud have the same size");
CHECK(bottom[0]->height() == bottom[1]->height() && bottom[0]->width() == bottom[1]->width())<<
("input image and kernel shoud have the pixel number");
int count = top[0]->count();
conv_kernel<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>
(count, input_data, kernel_data, mask_data, output_data,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
}
template <typename Dtype>
void MessagePassingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
//LOG(INFO) << ("message pasing backward_gpu start.");
const Dtype * top_diff = top[0]->gpu_diff();
const Dtype * bottom_data = bottom[0]->gpu_data();
Dtype * bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype * kernel_data = bottom[1]->gpu_data();
Dtype * kernel_diff = bottom[1]->mutable_gpu_diff();
const Dtype * mask_data = (user_interaction_constrain_)? bottom[2]->gpu_data(): NULL;
int bottom_count = bottom[0]->count();
conv_gradient_to_input_kernel<Dtype><<<CAFFE_GET_BLOCKS(bottom_count), CAFFE_CUDA_NUM_THREADS>>>
(bottom_count, top_diff, kernel_data, mask_data, bottom_diff,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
int kernel_count = bottom[1]->count();
conv_gradient_to_weight_kernel<Dtype><<<CAFFE_GET_BLOCKS(kernel_count), CAFFE_CUDA_NUM_THREADS>>>
(kernel_count, top_diff, bottom_data, mask_data, kernel_diff,
bottom[0]->num(), bottom[0]->channels(), bottom[0]->height(), bottom[0]->width(),
bottom[1]->channels(), user_interaction_constrain_);
}
INSTANTIATE_LAYER_GPU_FUNCS(MessagePassingLayer);
} // namespace caffe
|
58ed34e343ae8216eb07b07526be649251cd5701.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/tensor/resize_impl.h"
namespace onnxruntime {
namespace cuda {
using onnxruntime::ResizeCoordinateTransformationMode;
using onnxruntime::ResizeNearestMode;
using onnxruntime::UpsampleMode;
__device__ int NearestPixel_SIMPLE(float x_original, bool is_down_sampling) {
if (is_down_sampling) {
return static_cast<int>(ceil(x_original));
} else {
return static_cast<int>(x_original);
}
}
__device__ int NearestPixel_ROUND_PREFER_FLOOR(float x_original, bool) {
if (x_original == static_cast<int>(x_original) + 0.5f) {
return static_cast<int>(floor(x_original));
}
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_ROUND_PREFER_CEIL(float x_original, bool) {
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_FLOOR(float x_original, bool) {
return static_cast<int>(floor(x_original));
}
__device__ int NearestPixel_CEIL(float x_original, bool) {
return static_cast<int>(ceil(x_original));
}
using CudaFunctionNearestPixel = int (*)(float, bool);
__device__ CudaFunctionNearestPixel func_NearestPixel_SIMPLE = NearestPixel_SIMPLE;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_FLOOR = NearestPixel_ROUND_PREFER_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_CEIL = NearestPixel_ROUND_PREFER_CEIL;
__device__ CudaFunctionNearestPixel func_NearestPixel_FLOOR = NearestPixel_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_CEIL = NearestPixel_CEIL;
CudaFunctionNearestPixel GetDeviceNearstPixelFunction(ResizeNearestMode nearest_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionNearestPixel s_nearest_pixel[ResizeNearestMode::NearestModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::SIMPLE],
func_NearestPixel_SIMPLE, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_FLOOR],
func_NearestPixel_ROUND_PREFER_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_CEIL],
func_NearestPixel_ROUND_PREFER_CEIL, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::FLOOR],
func_NearestPixel_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(hipMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::CEIL],
func_NearestPixel_CEIL, sizeof(CudaFunctionNearestPixel)));
already_copied = true;
}
}
return s_nearest_pixel[nearest_mode];
}
__device__ float TransformCoordinate_ASYMMETRIC(float x_resized, float x_scale, float, float, float, float) {
return x_resized / x_scale;
}
__device__ float TransformCoordinate_HALF_PIXEL(float x_resized, float x_scale, float, float, float, float) {
return ((x_resized + 0.5f) / x_scale) - 0.5f;
}
__device__ float TransformCoordinate_PYTORCH_HALF_PIXEL(
float x_resized, float x_scale, float length_resized, float, float, float) {
return length_resized > 1 ? (x_resized + 0.5f) / x_scale - 0.5f : 0.0f;
}
__device__ float TransformCoordinate_TF_HALF_PIXEL_FOR_NN(
float x_resized, float x_scale, float, float, float, float) {
return (x_resized + 0.5f) / x_scale;
}
__device__ float TransformCoordinate_ALIGN_CORNERS(
float x_resized, float, float length_resized, float length_original, float, float) {
return length_resized == 1 ? 0 : x_resized * (length_original - 1) / (length_resized - 1);
}
__device__ float TransformCoordinate_TF_CROP_AND_RESIZE(
float x_resized, float, float length_resized, float length_original, float roi_start, float roi_end) {
auto orig = length_resized > 1
? roi_start * (length_original - 1) + (x_resized * (roi_end - roi_start) * (length_original - 1)) / (length_resized - 1)
: 0.5 * (roi_start + roi_end) * (length_original - 1);
return static_cast<float>(orig);
}
using CudaFunctionOriginalCoordinate = float (*)(float, float, float, float, float, float);
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ASYMMETRIC = TransformCoordinate_ASYMMETRIC;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_HALF_PIXEL = TransformCoordinate_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_PYTORCH_HALF_PIXEL = TransformCoordinate_PYTORCH_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ALIGN_CORNERS = TransformCoordinate_ALIGN_CORNERS;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN = TransformCoordinate_TF_HALF_PIXEL_FOR_NN;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_CROP_AND_RESIZE = TransformCoordinate_TF_CROP_AND_RESIZE;
CudaFunctionOriginalCoordinate GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode coordinate_transform_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionOriginalCoordinate s_coordinate_tranforms[ResizeCoordinateTransformationMode::CoordinateTransformationModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::HALF_PIXEL],
func_TransformCoordinate_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ASYMMETRIC],
func_TransformCoordinate_ASYMMETRIC, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::PYTORCH_HALF_PIXEL],
func_TransformCoordinate_PYTORCH_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ALIGN_CORNERS],
func_TransformCoordinate_ALIGN_CORNERS, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_HALF_PIXEL_FOR_NN],
func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(hipMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE],
func_TransformCoordinate_TF_CROP_AND_RESIZE, sizeof(CudaFunctionOriginalCoordinate)));
already_copied = true;
}
}
return s_coordinate_tranforms[coordinate_transform_mode];
}
struct NearestMappingInfo {
int origin_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeNearestMappingKernel2D(
const int input_height, const int input_width,
const int output_height, const int output_width,
const float scales_height, const float scales_width,
const float roi_start_height, const float roi_end_height,
const float roi_start_width, const float roi_end_width,
const bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, output_height + output_width);
if (id >= 0 && id < output_height) { // for Height
int dim = id;
// only apply co-ordinate transformation if scale != 1.0
if (scales_height == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_height, static_cast<float>(output_height),
static_cast<float>(input_height), roi_start_height, roi_end_height);
dims_mapping[id].extrapolate_ = static_cast<int>(
extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_height - 1)));
dim = calc_nearest_pixel(orig_coord, scales_height < 1);
if (dim >= input_height) dim = input_height - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
} else {
int dim = id - output_height;
// only apply co-ordinate transformation if scale != 1.0
if (scales_width == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_width, static_cast<float>(output_width),
static_cast<float>(input_width), roi_start_width, roi_end_width);
dims_mapping[id].extrapolate_ = static_cast<int>(
extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_width - 1)));
dim = calc_nearest_pixel(orig_coord, scales_width < 1);
if (dim >= input_width) dim = input_width - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
return;
}
}
template <typename T>
__global__ void _ResizeNearestMappingKernel(
const size_t rank,
const TArray<int64_t> input_shape,
const TArray<int64_t> output_shape,
const TArray<float> scales,
const TArray<float> roi,
const size_t total_dim_sum,
bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, total_dim_sum);
int64_t dim_sum = 0;
for (int axis = 0; axis < rank; ++axis) {
if (id == dim_sum) {
prefix_dim_sum[axis] = dim_sum;
}
if (id >= dim_sum && id < dim_sum + output_shape[axis]) {
int dim = id - dim_sum;
// only apply co-ordinate transformation if scale != 1.0
if (scales[axis] == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales[axis], static_cast<float>(output_shape[axis]),
static_cast<float>(input_shape[axis]), roi[axis], roi[axis + rank]);
dims_mapping[id].extrapolate_ = static_cast<int>(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_shape[axis] - 1)));
dim = calc_nearest_pixel(orig_coord, scales[axis] < 1);
if (dim >= input_shape[axis]) dim = input_shape[axis] - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
return;
}
dim_sum += output_shape[axis];
}
}
template <typename T, bool UseExtrapolation>
__global__ void _ResizeNearestKernel2D(
const int64_t output_height, const int64_t output_width,
const int64_t input_stride_image, const int input_stride_row,
const fast_divmod output_stride_image, const fast_divmod output_stride_row,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value, const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int imageid, h, w, output_index;
output_stride_image.divmod(static_cast<int>(id), imageid, output_index);
output_stride_row.divmod(output_index, h, w);
if (UseExtrapolation) {
if (dims_mapping[h].extrapolate_ + dims_mapping[output_height + w].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
}
int input_index = input_stride_image * imageid +
input_stride_row * dims_mapping[h].origin_ +
dims_mapping[output_height + w].origin_;
output_data[id] = input_data[input_index];
}
template <typename T>
__global__ void _ResizeNearestKernel(
const int rank,
const TArray<int64_t> input_strides,
const TArray<fast_divmod> output_div_pitches,
const T* input_data,
T* output_data,
const size_t N,
const T extrapolation_value,
const int64_t* prefix_dim_sum,
const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int output_index = static_cast<int>(id);
int input_index = 0;
int extrapolation_occured = 0;
for (int axis = 0; axis < rank; ++axis) {
int dim = 0;
output_div_pitches[axis].divmod(output_index, dim, output_index);
const NearestMappingInfo& mi = dims_mapping[prefix_dim_sum[axis] + dim];
extrapolation_occured += mi.extrapolate_;
input_index += input_strides[axis] * mi.origin_;
}
output_data[id] = extrapolation_occured ? extrapolation_value : input_data[input_index];
}
struct BilinearMappingInfo {
int origin_;
float weight_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeBilinearCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
if (id < output_height) { // y = id
float input_y = scale_height == 1 ? static_cast<float>(id) :
transform_coordinate(static_cast<float>(id), scale_height,
static_cast<float>(output_height), static_cast<float>(input_height),
roi_height_start, roi_height_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 || input_y > static_cast<float>(input_height - 1)));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
dims_mapping[id].weight_ = (y_int >= input_height - 1) ? 0.5f : input_y - y_int;
} else { //x = id - output_height
float input_x = scale_width == 1 ? static_cast<float>(id - output_height) :
transform_coordinate(static_cast<float>(id - output_height), scale_width,
static_cast<float>(output_width), static_cast<float>(input_width),
roi_width_start, roi_width_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_x < 0 || input_x > static_cast<float>(input_width - 1)));
input_x = max(0.0f, min(input_x, static_cast<float>(input_width - 1)));
int x_int = static_cast<int>(input_x);
dims_mapping[id].origin_ = x_int;
dims_mapping[id].weight_ = (x_int >= input_width - 1) ? 0.5f : input_x - x_int;
}
}
// The following method supports a N-D input in 'Linear mode'. Last two dimension is [H, W].
// the scale values for the outer dimensions except last two are 1.
template <typename T>
__global__ void _ResizeBilinearKernel(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
int output_y, output_x;
div_output_width.divmod(output_image_index, output_y, output_x);
if (dims_mapping[output_y].extrapolate_ || dims_mapping[output_x + output_height].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float y_offset_0 = dims_mapping[output_y].weight_;
int y_int = dims_mapping[output_y].origin_;
float x_offset_0 = dims_mapping[output_x + output_height].weight_;
int x_int = dims_mapping[output_x + output_height].origin_;
input_index += y_int * input_width + x_int;
T x00 = input_data[input_index];
bool end_of_h = (y_int >= input_height - 1);
bool end_of_w = (x_int >= input_width - 1);
T x10 = end_of_w ? x00 : input_data[input_index + 1];
T x01 = end_of_h ? x00 : input_data[input_index + input_width];
T x11 = end_of_w ? x01 : (end_of_h ? x10 : input_data[input_index + input_width + 1]);
float y_offset_1 = 1.0f - y_offset_0;
float x_offset_1 = 1.0f - x_offset_0;
output_data[id] =
x00 * static_cast<T>(y_offset_1 * x_offset_1) +
x01 * static_cast<T>(y_offset_0 * x_offset_1) +
x10 * static_cast<T>(y_offset_1 * x_offset_0) +
x11 * static_cast<T>(y_offset_0 * x_offset_0);
}
template <typename T>
__device__ __forceinline__ float CubicInterpolationRowwise(
const T* image, int x, int y, int input_height, int input_width,
float coeff0, float coeff1, float coeff2, float coeff3) {
int row_index = max(0, min(y, input_height - 1)) * input_width;
return coeff0 * static_cast<float>(image[row_index + max(0, min(x - 1, input_width - 1))]) +
coeff1 * static_cast<float>(image[row_index + max(0, min(x, input_width - 1))]) +
coeff2 * static_cast<float>(image[row_index + max(0, min(x + 1, input_width - 1))]) +
coeff3 * static_cast<float>(image[row_index + max(0, min(x + 2, input_width - 1))]);
}
struct CubicMappingInfo {
int origin_;
int extrapolate_;
float coeff0_;
float coeff1_;
float coeff2_;
float coeff3_;
};
template <typename T>
__global__ void _ResizeCubicCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
float cubic_coeff_a, bool exclude_outside,
CudaFunctionOriginalCoordinate transform_coordinate,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
auto& dm = dims_mapping[id];
bool is_y_axis = (id < output_height);
int max_input_coord = static_cast<int>(is_y_axis ? input_height : input_width);
float scale = is_y_axis ? scale_height : scale_width;
float input_coordinat = scale == 1 ? (is_y_axis ? id : id - output_height) :
transform_coordinate(
static_cast<float>(is_y_axis ? id : id - output_height),
scale,
static_cast<float>(is_y_axis ? output_height : output_width),
static_cast<float>(max_input_coord),
(is_y_axis ? roi_height_start : roi_width_start),
(is_y_axis ? roi_height_end : roi_width_end));
int coord_int = static_cast<int>(floor(input_coordinat));
float s_coord = abs(input_coordinat - coord_int);
float coeff_sum = 1.0f;
float coeff_0 = static_cast<float>(((cubic_coeff_a * (s_coord + 1) - 5 * cubic_coeff_a) * (s_coord + 1) + 8 * cubic_coeff_a) * (s_coord + 1) - 4 * cubic_coeff_a);
float coeff_1 = static_cast<float>(((cubic_coeff_a + 2) * s_coord - (cubic_coeff_a + 3)) * s_coord * s_coord + 1);
float coeff_2 = static_cast<float>(((cubic_coeff_a + 2) * (1 - s_coord) - (cubic_coeff_a + 3)) * (1 - s_coord) * (1 - s_coord) + 1);
float coeff_3 = static_cast<float>(((cubic_coeff_a * (2 - s_coord) - 5 * cubic_coeff_a) * (2 - s_coord) + 8 * cubic_coeff_a) * (2 - s_coord) - 4 * cubic_coeff_a);
if (exclude_outside) {
coeff_0 = (coord_int - 1 < 0 || coord_int - 1 >= max_input_coord) ? 0.0 : coeff_0;
coeff_1 = (coord_int + 0 < 0 || coord_int + 0 >= max_input_coord) ? 0.0 : coeff_1;
coeff_2 = (coord_int + 1 < 0 || coord_int + 1 >= max_input_coord) ? 0.0 : coeff_2;
coeff_3 = (coord_int + 2 < 0 || coord_int + 2 >= max_input_coord) ? 0.0 : coeff_3;
coeff_sum = coeff_0 + coeff_1 + coeff_2 + coeff_3;
}
dm.origin_ = coord_int;
dm.coeff0_ = coeff_0 / coeff_sum;
dm.coeff1_ = coeff_1 / coeff_sum;
dm.coeff2_ = coeff_2 / coeff_sum;
dm.coeff3_ = coeff_3 / coeff_sum;
dm.extrapolate_ = (int)(extrapolation_enabled && (input_coordinat < 0 || input_coordinat > static_cast<float>(max_input_coord - 1)));
}
template <typename T>
__global__ void _ResizeBiCubicKernel(
int64_t input_height, int64_t input_width, int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N, const T extrapolation_value,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index, output_x, output_y;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
div_output_width.divmod(output_image_index, output_y, output_x);
CubicMappingInfo& y_info = dims_mapping[output_y];
CubicMappingInfo& x_info = dims_mapping[output_x + output_height];
if (y_info.extrapolate_ || x_info.extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float w0 = x_info.coeff0_;
float w1 = x_info.coeff1_;
float w2 = x_info.coeff2_;
float w3 = x_info.coeff3_;
int x_int = x_info.origin_;
int y_int = y_info.origin_;
const T* image = input_data + input_index;
output_data[id] = y_info.coeff0_ * CubicInterpolationRowwise(image, x_int, y_int - 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff1_ * CubicInterpolationRowwise(image, x_int, y_int, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff2_ * CubicInterpolationRowwise(image, x_int, y_int + 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff3_ * CubicInterpolationRowwise(image, x_int, y_int + 2, input_height, input_width, w0, w1, w2, w3);
}
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const std::vector<int64_t>& output_dims) {
switch (upsample_mode) {
case UpsampleMode::NN:
return sizeof(int64_t) * output_dims.size() + sizeof(NearestMappingInfo) * std::accumulate(output_dims.begin(), output_dims.end(), 0);
case UpsampleMode::LINEAR:
return sizeof(BilinearMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
case UpsampleMode::CUBIC:
return sizeof(CubicMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
}
return 0;
}
template <typename T>
void ResizeNearestImpl(
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* /* prefix_dim_sum */,
NearestMappingInfo* dims_mapping) {
int blocksPerGrid = static_cast<int>(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
bool could2d = rank >= 2 &&
transform_coordinate != GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE) &&
std::all_of(scales_vals.Data(), scales_vals.Data() + (rank - 2), [](float v) { return v == 1.0; });
if (could2d) {
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(output_height * output_width);
int blocksPerDimsMappingGrid = static_cast<int>(ceil((output_height + output_width) / 32.0));
hipLaunchKernelGGL(( _ResizeNearestMappingKernel2D<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
extrapolation_enabled, transform_coordinate, calc_nearest_pixel,
dims_mapping);
if (extrapolation_enabled) {
hipLaunchKernelGGL(( _ResizeNearestKernel2D<T, true>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
} else {
hipLaunchKernelGGL(( _ResizeNearestKernel2D<T, false>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
}
return;
}
int64_t total_dim_sum = std::accumulate(output_shape.Data(), output_shape.Data() + rank, 0);
int blocksPerDimsMappingGrid = (int)(ceil(static_cast<double>(total_dim_sum) / 32));
hipLaunchKernelGGL(( _ResizeNearestMappingKernel<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
rank, input_shape, output_shape,
scales_vals, roi_vals,
total_dim_sum, extrapolation_enabled,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
hipLaunchKernelGGL(( _ResizeNearestKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
rank, input_strides, output_div_pitches,
input_data, output_data, N,
extrapolation_value,
reinterpret_cast<const int64_t*>(dims_mapping),
reinterpret_cast<const NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
template <typename T>
void ResizeImpl(
const UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
ResizeCoordinateTransformationMode coordinate_transform_mode,
ResizeNearestMode nearest_mode,
void* dims_mapping) {
bool isSame = std::all_of(scales_vals.Data(), scales_vals.Data() + rank, [](float v) { return v == 1.0f; }) &&
(coordinate_transform_mode != ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE);
if (isSame) {
hipMemcpyAsync(output_data, input_data, N * sizeof(T), hipMemcpyDeviceToDevice);
return;
}
CudaFunctionOriginalCoordinate transform_coordinate = GetDeviceOriginalCoordinateFunc(coordinate_transform_mode);
CudaFunctionNearestPixel calc_nearest_pixel = GetDeviceNearstPixelFunction(nearest_mode);
if (upsample_mode == UpsampleMode::NN) {
ResizeNearestImpl(
rank, input_shape, output_shape, input_strides, output_div_pitches,
scales_vals, roi_vals, input_data, output_data, N,
extrapolation_enabled, extrapolation_value, cubic_coeff_a,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(gsl::narrow_cast<int>(N));
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
switch (upsample_mode) {
case UpsampleMode::LINEAR:
hipLaunchKernelGGL(( _ResizeBilinearCoordinateMapping<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled, transform_coordinate,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
hipLaunchKernelGGL(( _ResizeBilinearKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
return;
case UpsampleMode::CUBIC:
hipLaunchKernelGGL(( _ResizeCubicCoordinateMapping<T>), dim3(blocksPerDimsMappingGrid), dim3(32), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled,
cubic_coeff_a, exclude_outside, transform_coordinate,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
hipLaunchKernelGGL(( _ResizeBiCubicKernel<T>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
return;
}
}
#define SPECIALIZED_IMPL(T) \
template void ResizeImpl<T>( \
const UpsampleMode upsample_mode, \
const int rank, \
TArray<int64_t>& input_shape, \
TArray<int64_t>& output_shape, \
TArray<int64_t>& input_strides, \
TArray<fast_divmod>& output_div_pitches, \
TArray<float>& scales_vals, \
TArray<float>& roi_vals, \
const T* input_data, \
T* output_data, \
const size_t N, \
bool extrapolation_enabled, \
const T extrapolation_value, \
float cubic_coeff_a, \
bool exclude_outside, \
ResizeCoordinateTransformationMode coordinate_transform_mode, \
ResizeNearestMode nearest_mode, \
void* dims_mapping);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint8_t)
} // namespace cuda
} // namespace onnxruntime
| 58ed34e343ae8216eb07b07526be649251cd5701.cu | #include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/tensor/resize_impl.h"
namespace onnxruntime {
namespace cuda {
using onnxruntime::ResizeCoordinateTransformationMode;
using onnxruntime::ResizeNearestMode;
using onnxruntime::UpsampleMode;
__device__ int NearestPixel_SIMPLE(float x_original, bool is_down_sampling) {
if (is_down_sampling) {
return static_cast<int>(ceil(x_original));
} else {
return static_cast<int>(x_original);
}
}
__device__ int NearestPixel_ROUND_PREFER_FLOOR(float x_original, bool) {
if (x_original == static_cast<int>(x_original) + 0.5f) {
return static_cast<int>(floor(x_original));
}
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_ROUND_PREFER_CEIL(float x_original, bool) {
return static_cast<int>(round(x_original));
}
__device__ int NearestPixel_FLOOR(float x_original, bool) {
return static_cast<int>(floor(x_original));
}
__device__ int NearestPixel_CEIL(float x_original, bool) {
return static_cast<int>(ceil(x_original));
}
using CudaFunctionNearestPixel = int (*)(float, bool);
__device__ CudaFunctionNearestPixel func_NearestPixel_SIMPLE = NearestPixel_SIMPLE;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_FLOOR = NearestPixel_ROUND_PREFER_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_ROUND_PREFER_CEIL = NearestPixel_ROUND_PREFER_CEIL;
__device__ CudaFunctionNearestPixel func_NearestPixel_FLOOR = NearestPixel_FLOOR;
__device__ CudaFunctionNearestPixel func_NearestPixel_CEIL = NearestPixel_CEIL;
CudaFunctionNearestPixel GetDeviceNearstPixelFunction(ResizeNearestMode nearest_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionNearestPixel s_nearest_pixel[ResizeNearestMode::NearestModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::SIMPLE],
func_NearestPixel_SIMPLE, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_FLOOR],
func_NearestPixel_ROUND_PREFER_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::ROUND_PREFER_CEIL],
func_NearestPixel_ROUND_PREFER_CEIL, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::FLOOR],
func_NearestPixel_FLOOR, sizeof(CudaFunctionNearestPixel)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_nearest_pixel[ResizeNearestMode::CEIL],
func_NearestPixel_CEIL, sizeof(CudaFunctionNearestPixel)));
already_copied = true;
}
}
return s_nearest_pixel[nearest_mode];
}
__device__ float TransformCoordinate_ASYMMETRIC(float x_resized, float x_scale, float, float, float, float) {
return x_resized / x_scale;
}
__device__ float TransformCoordinate_HALF_PIXEL(float x_resized, float x_scale, float, float, float, float) {
return ((x_resized + 0.5f) / x_scale) - 0.5f;
}
__device__ float TransformCoordinate_PYTORCH_HALF_PIXEL(
float x_resized, float x_scale, float length_resized, float, float, float) {
return length_resized > 1 ? (x_resized + 0.5f) / x_scale - 0.5f : 0.0f;
}
__device__ float TransformCoordinate_TF_HALF_PIXEL_FOR_NN(
float x_resized, float x_scale, float, float, float, float) {
return (x_resized + 0.5f) / x_scale;
}
__device__ float TransformCoordinate_ALIGN_CORNERS(
float x_resized, float, float length_resized, float length_original, float, float) {
return length_resized == 1 ? 0 : x_resized * (length_original - 1) / (length_resized - 1);
}
__device__ float TransformCoordinate_TF_CROP_AND_RESIZE(
float x_resized, float, float length_resized, float length_original, float roi_start, float roi_end) {
auto orig = length_resized > 1
? roi_start * (length_original - 1) + (x_resized * (roi_end - roi_start) * (length_original - 1)) / (length_resized - 1)
: 0.5 * (roi_start + roi_end) * (length_original - 1);
return static_cast<float>(orig);
}
using CudaFunctionOriginalCoordinate = float (*)(float, float, float, float, float, float);
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ASYMMETRIC = TransformCoordinate_ASYMMETRIC;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_HALF_PIXEL = TransformCoordinate_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_PYTORCH_HALF_PIXEL = TransformCoordinate_PYTORCH_HALF_PIXEL;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_ALIGN_CORNERS = TransformCoordinate_ALIGN_CORNERS;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN = TransformCoordinate_TF_HALF_PIXEL_FOR_NN;
__device__ CudaFunctionOriginalCoordinate func_TransformCoordinate_TF_CROP_AND_RESIZE = TransformCoordinate_TF_CROP_AND_RESIZE;
CudaFunctionOriginalCoordinate GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode coordinate_transform_mode) {
static bool already_copied = false;
static std::mutex s_mutext;
static CudaFunctionOriginalCoordinate s_coordinate_tranforms[ResizeCoordinateTransformationMode::CoordinateTransformationModeCount];
if (!already_copied) {
std::lock_guard<std::mutex> lock(s_mutext);
if (!already_copied) {
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::HALF_PIXEL],
func_TransformCoordinate_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ASYMMETRIC],
func_TransformCoordinate_ASYMMETRIC, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::PYTORCH_HALF_PIXEL],
func_TransformCoordinate_PYTORCH_HALF_PIXEL, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::ALIGN_CORNERS],
func_TransformCoordinate_ALIGN_CORNERS, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_HALF_PIXEL_FOR_NN],
func_TransformCoordinate_TF_HALF_PIXEL_FOR_NN, sizeof(CudaFunctionOriginalCoordinate)));
CUDA_CALL(cudaMemcpyFromSymbol(&s_coordinate_tranforms[ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE],
func_TransformCoordinate_TF_CROP_AND_RESIZE, sizeof(CudaFunctionOriginalCoordinate)));
already_copied = true;
}
}
return s_coordinate_tranforms[coordinate_transform_mode];
}
struct NearestMappingInfo {
int origin_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeNearestMappingKernel2D(
const int input_height, const int input_width,
const int output_height, const int output_width,
const float scales_height, const float scales_width,
const float roi_start_height, const float roi_end_height,
const float roi_start_width, const float roi_end_width,
const bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, output_height + output_width);
if (id >= 0 && id < output_height) { // for Height
int dim = id;
// only apply co-ordinate transformation if scale != 1.0
if (scales_height == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_height, static_cast<float>(output_height),
static_cast<float>(input_height), roi_start_height, roi_end_height);
dims_mapping[id].extrapolate_ = static_cast<int>(
extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_height - 1)));
dim = calc_nearest_pixel(orig_coord, scales_height < 1);
if (dim >= input_height) dim = input_height - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
} else {
int dim = id - output_height;
// only apply co-ordinate transformation if scale != 1.0
if (scales_width == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales_width, static_cast<float>(output_width),
static_cast<float>(input_width), roi_start_width, roi_end_width);
dims_mapping[id].extrapolate_ = static_cast<int>(
extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_width - 1)));
dim = calc_nearest_pixel(orig_coord, scales_width < 1);
if (dim >= input_width) dim = input_width - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
return;
}
}
template <typename T>
__global__ void _ResizeNearestMappingKernel(
const size_t rank,
const TArray<int64_t> input_shape,
const TArray<int64_t> output_shape,
const TArray<float> scales,
const TArray<float> roi,
const size_t total_dim_sum,
bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* prefix_dim_sum,
NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, total_dim_sum);
int64_t dim_sum = 0;
for (int axis = 0; axis < rank; ++axis) {
if (id == dim_sum) {
prefix_dim_sum[axis] = dim_sum;
}
if (id >= dim_sum && id < dim_sum + output_shape[axis]) {
int dim = id - dim_sum;
// only apply co-ordinate transformation if scale != 1.0
if (scales[axis] == 1.0f) {
dims_mapping[id].extrapolate_ = 0;
} else {
float orig_coord = transform_coordinate(static_cast<float>(dim), scales[axis], static_cast<float>(output_shape[axis]),
static_cast<float>(input_shape[axis]), roi[axis], roi[axis + rank]);
dims_mapping[id].extrapolate_ = static_cast<int>(extrapolation_enabled && (orig_coord < 0.f || orig_coord > static_cast<float>(input_shape[axis] - 1)));
dim = calc_nearest_pixel(orig_coord, scales[axis] < 1);
if (dim >= input_shape[axis]) dim = input_shape[axis] - 1;
if (dim < 0) dim = 0;
}
dims_mapping[id].origin_ = dim;
return;
}
dim_sum += output_shape[axis];
}
}
template <typename T, bool UseExtrapolation>
__global__ void _ResizeNearestKernel2D(
const int64_t output_height, const int64_t output_width,
const int64_t input_stride_image, const int input_stride_row,
const fast_divmod output_stride_image, const fast_divmod output_stride_row,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value, const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int imageid, h, w, output_index;
output_stride_image.divmod(static_cast<int>(id), imageid, output_index);
output_stride_row.divmod(output_index, h, w);
if (UseExtrapolation) {
if (dims_mapping[h].extrapolate_ + dims_mapping[output_height + w].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
}
int input_index = input_stride_image * imageid +
input_stride_row * dims_mapping[h].origin_ +
dims_mapping[output_height + w].origin_;
output_data[id] = input_data[input_index];
}
template <typename T>
__global__ void _ResizeNearestKernel(
const int rank,
const TArray<int64_t> input_strides,
const TArray<fast_divmod> output_div_pitches,
const T* input_data,
T* output_data,
const size_t N,
const T extrapolation_value,
const int64_t* prefix_dim_sum,
const NearestMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int output_index = static_cast<int>(id);
int input_index = 0;
int extrapolation_occured = 0;
for (int axis = 0; axis < rank; ++axis) {
int dim = 0;
output_div_pitches[axis].divmod(output_index, dim, output_index);
const NearestMappingInfo& mi = dims_mapping[prefix_dim_sum[axis] + dim];
extrapolation_occured += mi.extrapolate_;
input_index += input_strides[axis] * mi.origin_;
}
output_data[id] = extrapolation_occured ? extrapolation_value : input_data[input_index];
}
struct BilinearMappingInfo {
int origin_;
float weight_;
int extrapolate_;
};
template <typename T>
__global__ void _ResizeBilinearCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
CudaFunctionOriginalCoordinate transform_coordinate,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
if (id < output_height) { // y = id
float input_y = scale_height == 1 ? static_cast<float>(id) :
transform_coordinate(static_cast<float>(id), scale_height,
static_cast<float>(output_height), static_cast<float>(input_height),
roi_height_start, roi_height_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_y < 0 || input_y > static_cast<float>(input_height - 1)));
input_y = max(0.0f, min(input_y, static_cast<float>(input_height - 1)));
int y_int = static_cast<int>(input_y);
dims_mapping[id].origin_ = y_int;
dims_mapping[id].weight_ = (y_int >= input_height - 1) ? 0.5f : input_y - y_int;
} else { //x = id - output_height
float input_x = scale_width == 1 ? static_cast<float>(id - output_height) :
transform_coordinate(static_cast<float>(id - output_height), scale_width,
static_cast<float>(output_width), static_cast<float>(input_width),
roi_width_start, roi_width_end);
dims_mapping[id].extrapolate_ = (int)(extrapolation_enabled && (input_x < 0 || input_x > static_cast<float>(input_width - 1)));
input_x = max(0.0f, min(input_x, static_cast<float>(input_width - 1)));
int x_int = static_cast<int>(input_x);
dims_mapping[id].origin_ = x_int;
dims_mapping[id].weight_ = (x_int >= input_width - 1) ? 0.5f : input_x - x_int;
}
}
// The following method supports a N-D input in 'Linear mode'. Last two dimension is [H, W].
// the scale values for the outer dimensions except last two are 1.
template <typename T>
__global__ void _ResizeBilinearKernel(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N,
const T extrapolation_value,
BilinearMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
int output_y, output_x;
div_output_width.divmod(output_image_index, output_y, output_x);
if (dims_mapping[output_y].extrapolate_ || dims_mapping[output_x + output_height].extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float y_offset_0 = dims_mapping[output_y].weight_;
int y_int = dims_mapping[output_y].origin_;
float x_offset_0 = dims_mapping[output_x + output_height].weight_;
int x_int = dims_mapping[output_x + output_height].origin_;
input_index += y_int * input_width + x_int;
T x00 = input_data[input_index];
bool end_of_h = (y_int >= input_height - 1);
bool end_of_w = (x_int >= input_width - 1);
T x10 = end_of_w ? x00 : input_data[input_index + 1];
T x01 = end_of_h ? x00 : input_data[input_index + input_width];
T x11 = end_of_w ? x01 : (end_of_h ? x10 : input_data[input_index + input_width + 1]);
float y_offset_1 = 1.0f - y_offset_0;
float x_offset_1 = 1.0f - x_offset_0;
output_data[id] =
x00 * static_cast<T>(y_offset_1 * x_offset_1) +
x01 * static_cast<T>(y_offset_0 * x_offset_1) +
x10 * static_cast<T>(y_offset_1 * x_offset_0) +
x11 * static_cast<T>(y_offset_0 * x_offset_0);
}
template <typename T>
__device__ __forceinline__ float CubicInterpolationRowwise(
const T* image, int x, int y, int input_height, int input_width,
float coeff0, float coeff1, float coeff2, float coeff3) {
int row_index = max(0, min(y, input_height - 1)) * input_width;
return coeff0 * static_cast<float>(image[row_index + max(0, min(x - 1, input_width - 1))]) +
coeff1 * static_cast<float>(image[row_index + max(0, min(x, input_width - 1))]) +
coeff2 * static_cast<float>(image[row_index + max(0, min(x + 1, input_width - 1))]) +
coeff3 * static_cast<float>(image[row_index + max(0, min(x + 2, input_width - 1))]);
}
struct CubicMappingInfo {
int origin_;
int extrapolate_;
float coeff0_;
float coeff1_;
float coeff2_;
float coeff3_;
};
template <typename T>
__global__ void _ResizeCubicCoordinateMapping(
int64_t input_height, int64_t input_width,
int64_t output_height, int64_t output_width,
float scale_height, float scale_width,
float roi_height_start, float roi_height_end,
float roi_width_start, float roi_width_end,
const size_t SumHW, bool extrapolation_enabled,
float cubic_coeff_a, bool exclude_outside,
CudaFunctionOriginalCoordinate transform_coordinate,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, SumHW);
auto& dm = dims_mapping[id];
bool is_y_axis = (id < output_height);
int max_input_coord = static_cast<int>(is_y_axis ? input_height : input_width);
float scale = is_y_axis ? scale_height : scale_width;
float input_coordinat = scale == 1 ? (is_y_axis ? id : id - output_height) :
transform_coordinate(
static_cast<float>(is_y_axis ? id : id - output_height),
scale,
static_cast<float>(is_y_axis ? output_height : output_width),
static_cast<float>(max_input_coord),
(is_y_axis ? roi_height_start : roi_width_start),
(is_y_axis ? roi_height_end : roi_width_end));
int coord_int = static_cast<int>(floor(input_coordinat));
float s_coord = abs(input_coordinat - coord_int);
float coeff_sum = 1.0f;
float coeff_0 = static_cast<float>(((cubic_coeff_a * (s_coord + 1) - 5 * cubic_coeff_a) * (s_coord + 1) + 8 * cubic_coeff_a) * (s_coord + 1) - 4 * cubic_coeff_a);
float coeff_1 = static_cast<float>(((cubic_coeff_a + 2) * s_coord - (cubic_coeff_a + 3)) * s_coord * s_coord + 1);
float coeff_2 = static_cast<float>(((cubic_coeff_a + 2) * (1 - s_coord) - (cubic_coeff_a + 3)) * (1 - s_coord) * (1 - s_coord) + 1);
float coeff_3 = static_cast<float>(((cubic_coeff_a * (2 - s_coord) - 5 * cubic_coeff_a) * (2 - s_coord) + 8 * cubic_coeff_a) * (2 - s_coord) - 4 * cubic_coeff_a);
if (exclude_outside) {
coeff_0 = (coord_int - 1 < 0 || coord_int - 1 >= max_input_coord) ? 0.0 : coeff_0;
coeff_1 = (coord_int + 0 < 0 || coord_int + 0 >= max_input_coord) ? 0.0 : coeff_1;
coeff_2 = (coord_int + 1 < 0 || coord_int + 1 >= max_input_coord) ? 0.0 : coeff_2;
coeff_3 = (coord_int + 2 < 0 || coord_int + 2 >= max_input_coord) ? 0.0 : coeff_3;
coeff_sum = coeff_0 + coeff_1 + coeff_2 + coeff_3;
}
dm.origin_ = coord_int;
dm.coeff0_ = coeff_0 / coeff_sum;
dm.coeff1_ = coeff_1 / coeff_sum;
dm.coeff2_ = coeff_2 / coeff_sum;
dm.coeff3_ = coeff_3 / coeff_sum;
dm.extrapolate_ = (int)(extrapolation_enabled && (input_coordinat < 0 || input_coordinat > static_cast<float>(max_input_coord - 1)));
}
template <typename T>
__global__ void _ResizeBiCubicKernel(
int64_t input_height, int64_t input_width, int64_t output_height, int64_t output_width,
fast_divmod div_output_width, fast_divmod div_output_image,
const T* input_data, T* output_data, const size_t N, const T extrapolation_value,
CubicMappingInfo* dims_mapping) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, N);
int bxc, output_image_index, output_x, output_y;
div_output_image.divmod(id, bxc, output_image_index);
CUDA_LONG input_index = bxc * input_height * input_width;
div_output_width.divmod(output_image_index, output_y, output_x);
CubicMappingInfo& y_info = dims_mapping[output_y];
CubicMappingInfo& x_info = dims_mapping[output_x + output_height];
if (y_info.extrapolate_ || x_info.extrapolate_) {
output_data[id] = extrapolation_value;
return;
}
float w0 = x_info.coeff0_;
float w1 = x_info.coeff1_;
float w2 = x_info.coeff2_;
float w3 = x_info.coeff3_;
int x_int = x_info.origin_;
int y_int = y_info.origin_;
const T* image = input_data + input_index;
output_data[id] = y_info.coeff0_ * CubicInterpolationRowwise(image, x_int, y_int - 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff1_ * CubicInterpolationRowwise(image, x_int, y_int, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff2_ * CubicInterpolationRowwise(image, x_int, y_int + 1, input_height, input_width, w0, w1, w2, w3) +
y_info.coeff3_ * CubicInterpolationRowwise(image, x_int, y_int + 2, input_height, input_width, w0, w1, w2, w3);
}
size_t CalcResizeBufferSize(const onnxruntime::UpsampleMode upsample_mode,
const std::vector<int64_t>& output_dims) {
switch (upsample_mode) {
case UpsampleMode::NN:
return sizeof(int64_t) * output_dims.size() + sizeof(NearestMappingInfo) * std::accumulate(output_dims.begin(), output_dims.end(), 0);
case UpsampleMode::LINEAR:
return sizeof(BilinearMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
case UpsampleMode::CUBIC:
return sizeof(CubicMappingInfo) * std::accumulate(output_dims.rbegin(), output_dims.rbegin() + 2, 0);
}
return 0;
}
template <typename T>
void ResizeNearestImpl(
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
CudaFunctionOriginalCoordinate transform_coordinate,
CudaFunctionNearestPixel calc_nearest_pixel,
int64_t* /* prefix_dim_sum */,
NearestMappingInfo* dims_mapping) {
int blocksPerGrid = static_cast<int>(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
bool could2d = rank >= 2 &&
transform_coordinate != GetDeviceOriginalCoordinateFunc(ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE) &&
std::all_of(scales_vals.Data(), scales_vals.Data() + (rank - 2), [](float v) { return v == 1.0; });
if (could2d) {
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(output_height * output_width);
int blocksPerDimsMappingGrid = static_cast<int>(ceil((output_height + output_width) / 32.0));
_ResizeNearestMappingKernel2D<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
extrapolation_enabled, transform_coordinate, calc_nearest_pixel,
dims_mapping);
if (extrapolation_enabled) {
_ResizeNearestKernel2D<T, true><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
} else {
_ResizeNearestKernel2D<T, false><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
output_height, output_width,
input_shape[rank - 2] * input_shape[rank - 1], input_shape[rank - 1],
div_output_image, output_div_pitches[rank - 2],
input_data, output_data, N,
extrapolation_value,
dims_mapping);
}
return;
}
int64_t total_dim_sum = std::accumulate(output_shape.Data(), output_shape.Data() + rank, 0);
int blocksPerDimsMappingGrid = (int)(ceil(static_cast<double>(total_dim_sum) / 32));
_ResizeNearestMappingKernel<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
rank, input_shape, output_shape,
scales_vals, roi_vals,
total_dim_sum, extrapolation_enabled,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
_ResizeNearestKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
rank, input_strides, output_div_pitches,
input_data, output_data, N,
extrapolation_value,
reinterpret_cast<const int64_t*>(dims_mapping),
reinterpret_cast<const NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
template <typename T>
void ResizeImpl(
const UpsampleMode upsample_mode,
const int rank,
TArray<int64_t>& input_shape,
TArray<int64_t>& output_shape,
TArray<int64_t>& input_strides,
TArray<fast_divmod>& output_div_pitches,
TArray<float>& scales_vals,
TArray<float>& roi_vals,
const T* input_data,
T* output_data,
const size_t N,
bool extrapolation_enabled,
const T extrapolation_value,
float cubic_coeff_a,
bool exclude_outside,
ResizeCoordinateTransformationMode coordinate_transform_mode,
ResizeNearestMode nearest_mode,
void* dims_mapping) {
bool isSame = std::all_of(scales_vals.Data(), scales_vals.Data() + rank, [](float v) { return v == 1.0f; }) &&
(coordinate_transform_mode != ResizeCoordinateTransformationMode::TF_CROP_AND_RESIZE);
if (isSame) {
cudaMemcpyAsync(output_data, input_data, N * sizeof(T), cudaMemcpyDeviceToDevice);
return;
}
CudaFunctionOriginalCoordinate transform_coordinate = GetDeviceOriginalCoordinateFunc(coordinate_transform_mode);
CudaFunctionNearestPixel calc_nearest_pixel = GetDeviceNearstPixelFunction(nearest_mode);
if (upsample_mode == UpsampleMode::NN) {
ResizeNearestImpl(
rank, input_shape, output_shape, input_strides, output_div_pitches,
scales_vals, roi_vals, input_data, output_data, N,
extrapolation_enabled, extrapolation_value, cubic_coeff_a,
transform_coordinate, calc_nearest_pixel,
reinterpret_cast<int64_t*>(dims_mapping),
reinterpret_cast<NearestMappingInfo*>(reinterpret_cast<int64_t*>(dims_mapping) + rank));
return;
}
int blocksPerGrid = (int)(ceil(static_cast<float>(N) / GridDim::maxThreadsPerBlock));
fast_divmod div_output_image = (rank > 2) ? output_div_pitches[rank - 3] : fast_divmod(gsl::narrow_cast<int>(N));
int64_t output_height = output_shape[rank - 2];
int64_t output_width = output_shape[rank - 1];
int blocksPerDimsMappingGrid = (int)(ceil((output_height + output_width) / 32.0));
switch (upsample_mode) {
case UpsampleMode::LINEAR:
_ResizeBilinearCoordinateMapping<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled, transform_coordinate,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
_ResizeBilinearKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<BilinearMappingInfo*>(dims_mapping));
return;
case UpsampleMode::CUBIC:
_ResizeCubicCoordinateMapping<T><<<blocksPerDimsMappingGrid, 32, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
scales_vals[rank - 2], scales_vals[rank - 1],
roi_vals[rank - 2], roi_vals[rank - 2 + rank],
roi_vals[rank - 1], roi_vals[rank - 1 + rank],
output_height + output_width, extrapolation_enabled,
cubic_coeff_a, exclude_outside, transform_coordinate,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
_ResizeBiCubicKernel<T><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
input_shape[rank - 2], input_shape[rank - 1],
output_height, output_width,
output_div_pitches[rank - 2], div_output_image,
input_data, output_data, N, extrapolation_value,
reinterpret_cast<CubicMappingInfo*>(dims_mapping));
return;
}
}
#define SPECIALIZED_IMPL(T) \
template void ResizeImpl<T>( \
const UpsampleMode upsample_mode, \
const int rank, \
TArray<int64_t>& input_shape, \
TArray<int64_t>& output_shape, \
TArray<int64_t>& input_strides, \
TArray<fast_divmod>& output_div_pitches, \
TArray<float>& scales_vals, \
TArray<float>& roi_vals, \
const T* input_data, \
T* output_data, \
const size_t N, \
bool extrapolation_enabled, \
const T extrapolation_value, \
float cubic_coeff_a, \
bool exclude_outside, \
ResizeCoordinateTransformationMode coordinate_transform_mode, \
ResizeNearestMode nearest_mode, \
void* dims_mapping);
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double)
SPECIALIZED_IMPL(half)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(uint8_t)
} // namespace cuda
} // namespace onnxruntime
|
180e4d6732d6c7decc69a54fc2edc04aa80c62cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector_functions.h>
#include "device.hpp"
#include "texture_binder.hpp"
#include "../internal.hpp"
#include "math_constants.h"
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
hipLaunchKernelGGL(( clear_volume_kernel), dim3(grid), dim3(block), 0, 0, volume);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
float3 test;
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, hipFilterModePoint, hipAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min (weight_prev + 1, volume.max_weight);
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
__global__
void project_kernel(const Projector proj, PtrStep<Point> points, PtrStepSz<ushort> depth, int rows, int cols)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
float qnan = numeric_limits<float>::quiet_NaN ();
if (x < cols || y < rows) {
Point pt = points(y, x);
if(isnan(pt.x) || isnan(pt.y) || isnan(pt.z))
return;
float3 point = make_float3(pt.x, pt.y, pt.z);
float2 coo = proj(point);
if (coo.x < 0 || coo.y < 0 || coo.y >= rows || coo.x >= cols)
{
points(y, x) = make_float4(qnan, qnan, qnan, 0.f);
return;
}
float Dp = tex2D(dists_tex, coo.x, coo.y);
depth(coo.y, coo.x) = 0;
points(y, x) = make_float4(coo.x * Dp, coo.y * Dp, Dp, 0.f);
}
}
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist;
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
hipLaunchKernelGGL(( integrate_kernel), dim3(grid), dim3(block), 0, 0, ti, volume);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall ( hipDeviceSynchronize() );
}
//TODO: rename as now projecting + removing from depth
void kfusion::device::project_and_remove(const PtrStepSz<ushort>& dists, Points &vertices, const Projector &proj)
{
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(vertices.cols(), block.x), divUp(vertices.rows(), block.y));
hipLaunchKernelGGL(( project_kernel) , dim3(grid), dim3(block), 0, 0, proj, vertices, dists, dists.rows, dists.cols);
cudaSafeCall ( hipGetLastError () );
}
//TODO: rename as now projecting + removing from depth
void kfusion::device::project(const PtrStepSz<ushort> &dists, Points &vertices, const Projector &proj)
{
dists_tex.filterMode = hipFilterModePoint;
dists_tex.addressMode[0] = hipAddressModeBorder;
dists_tex.addressMode[1] = hipAddressModeBorder;
dists_tex.addressMode[2] = hipAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(vertices.cols(), block.x), divUp(vertices.rows(), block.y));
hipLaunchKernelGGL(( project_kernel) , dim3(grid), dim3(block), 0, 0, proj, vertices, dists, dists.rows, dists.cols);
cudaSafeCall ( hipGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placed
// in the center of voxel (0,0,0), not in the corner of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (hipGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
hipLaunchKernelGGL(( raycast_kernel), dim3(grid), dim3(block), 0, 0, rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (hipGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
hipLaunchKernelGGL(( extract_kernel), dim3(grid), dim3(block), 0, 0, fs, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (32, 8);
dim3 grid (divUp ((int)points.size, block.x));
hipLaunchKernelGGL(( extract_normals_kernel), dim3(grid), dim3(block), 0, 0, en, output);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
| 180e4d6732d6c7decc69a54fc2edc04aa80c62cb.cu | #include <vector_functions.h>
#include "device.hpp"
#include "texture_binder.hpp"
#include "../internal.hpp"
#include "math_constants.h"
using namespace kfusion::device;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume initialization
namespace kfusion
{
namespace device
{
__global__ void clear_volume_kernel(TsdfVolume tsdf)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < tsdf.dims.x && y < tsdf.dims.y)
{
ushort2 *beg = tsdf.beg(x, y);
ushort2 *end = beg + tsdf.dims.x * tsdf.dims.y * tsdf.dims.z;
for(ushort2* pos = beg; pos != end; pos = tsdf.zstep(pos))
*pos = pack_tsdf (0.f, 0);
}
}
}
}
void kfusion::device::clear_volume(TsdfVolume volume)
{
dim3 block (32, 8);
dim3 grid (1, 1, 1);
grid.x = divUp (volume.dims.x, block.x);
grid.y = divUp (volume.dims.y, block.y);
clear_volume_kernel<<<grid, block>>>(volume);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume integration
float3 test;
namespace kfusion
{
namespace device
{
texture<float, 2> dists_tex(0, cudaFilterModePoint, cudaAddressModeBorder, cudaCreateChannelDescHalf());
struct TsdfIntegrator
{
Aff3f vol2cam;
Projector proj;
int2 dists_size;
float tranc_dist_inv;
__kf_device__
void operator()(TsdfVolume& volume) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume.dims.x || y >= volume.dims.y)
return;
//float3 zstep = vol2cam.R * make_float3(0.f, 0.f, volume.voxel_size.z);
float3 zstep = make_float3(vol2cam.R.data[0].z, vol2cam.R.data[1].z, vol2cam.R.data[2].z) * volume.voxel_size.z;
float3 vx = make_float3(x * volume.voxel_size.x, y * volume.voxel_size.y, 0);
float3 vc = vol2cam * vx; //tranform from volume coo frame to camera one
TsdfVolume::elem_type* vptr = volume.beg(x, y);
for(int i = 0; i < volume.dims.z; ++i, vc += zstep, vptr = volume.zstep(vptr))
{
float2 coo = proj(vc);
//#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300
// this is actually workaround for kepler. it doesn't return 0.f for texture
// fetches for out-of-border coordinates even for cudaaddressmodeborder mode
if (coo.x < 0 || coo.y < 0 || coo.x >= dists_size.x || coo.y >= dists_size.y)
continue;
//#endif
float Dp = tex2D(dists_tex, coo.x, coo.y);
if(Dp == 0 || vc.z <= 0)
continue;
float sdf = Dp - __fsqrt_rn(dot(vc, vc)); //Dp - norm(v)
if (sdf >= -volume.trunc_dist)
{
float tsdf = fmin(1.f, sdf * tranc_dist_inv);
//read and unpack
int weight_prev;
float tsdf_prev = unpack_tsdf (gmem::LdCs(vptr), weight_prev);
float tsdf_new = __fdividef(__fmaf_rn(tsdf_prev, weight_prev, tsdf), weight_prev + 1);
int weight_new = min (weight_prev + 1, volume.max_weight);
//pack and write
gmem::StCs(pack_tsdf (tsdf_new, weight_new), vptr);
}
} // for(;;)
}
};
__global__ void integrate_kernel( const TsdfIntegrator integrator, TsdfVolume volume) { integrator(volume); };
__global__
void project_kernel(const Projector proj, PtrStep<Point> points, PtrStepSz<ushort> depth, int rows, int cols)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
float qnan = numeric_limits<float>::quiet_NaN ();
if (x < cols || y < rows) {
Point pt = points(y, x);
if(isnan(pt.x) || isnan(pt.y) || isnan(pt.z))
return;
float3 point = make_float3(pt.x, pt.y, pt.z);
float2 coo = proj(point);
if (coo.x < 0 || coo.y < 0 || coo.y >= rows || coo.x >= cols)
{
points(y, x) = make_float4(qnan, qnan, qnan, 0.f);
return;
}
float Dp = tex2D(dists_tex, coo.x, coo.y);
depth(coo.y, coo.x) = 0;
points(y, x) = make_float4(coo.x * Dp, coo.y * Dp, Dp, 0.f);
}
}
}
}
void kfusion::device::integrate(const PtrStepSz<ushort>& dists, TsdfVolume& volume, const Aff3f& aff, const Projector& proj)
{
TsdfIntegrator ti;
ti.dists_size = make_int2(dists.cols, dists.rows);
ti.vol2cam = aff;
ti.proj = proj;
ti.tranc_dist_inv = 1.f/volume.trunc_dist;
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(volume.dims.x, block.x), divUp(volume.dims.y, block.y));
integrate_kernel<<<grid, block>>>(ti, volume);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall ( cudaDeviceSynchronize() );
}
//TODO: rename as now projecting + removing from depth
void kfusion::device::project_and_remove(const PtrStepSz<ushort>& dists, Points &vertices, const Projector &proj)
{
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(vertices.cols(), block.x), divUp(vertices.rows(), block.y));
project_kernel <<<grid, block>>>(proj, vertices, dists, dists.rows, dists.cols);
cudaSafeCall ( cudaGetLastError () );
}
//TODO: rename as now projecting + removing from depth
void kfusion::device::project(const PtrStepSz<ushort> &dists, Points &vertices, const Projector &proj)
{
dists_tex.filterMode = cudaFilterModePoint;
dists_tex.addressMode[0] = cudaAddressModeBorder;
dists_tex.addressMode[1] = cudaAddressModeBorder;
dists_tex.addressMode[2] = cudaAddressModeBorder;
TextureBinder binder(dists, dists_tex, cudaCreateChannelDescHalf()); (void)binder;
dim3 block(32, 8);
dim3 grid(divUp(vertices.cols(), block.x), divUp(vertices.rows(), block.y));
project_kernel <<<grid, block>>>(proj, vertices, dists, dists.rows, dists.cols);
cudaSafeCall ( cudaGetLastError () );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Volume ray casting
namespace kfusion
{
namespace device
{
__kf_device__ void intersect(float3 ray_org, float3 ray_dir, /*float3 box_min,*/ float3 box_max, float &tnear, float &tfar)
{
const float3 box_min = make_float3(0.f, 0.f, 0.f);
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.f/ray_dir.x, 1.f/ray_dir.y, 1.f/ray_dir.z);
float3 tbot = invR * (box_min - ray_org);
float3 ttop = invR * (box_max - ray_org);
// re-order intersections to find smallest and largest on each axis
float3 tmin = make_float3(fminf(ttop.x, tbot.x), fminf(ttop.y, tbot.y), fminf(ttop.z, tbot.z));
float3 tmax = make_float3(fmaxf(ttop.x, tbot.x), fmaxf(ttop.y, tbot.y), fmaxf(ttop.z, tbot.z));
// find the largest tmin and the smallest tmax
tnear = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
tfar = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
}
template<typename Vol>
__kf_device__ float interpolate(const Vol& volume, const float3& p_voxels)
{
float3 cf = p_voxels;
//rounding to negative infinity
int3 g = make_int3(__float2int_rd (cf.x), __float2int_rd (cf.y), __float2int_rd (cf.z));
if (g.x < 0 || g.x >= volume.dims.x - 1 || g.y < 0 || g.y >= volume.dims.y - 1 || g.z < 0 || g.z >= volume.dims.z - 1)
return numeric_limits<float>::quiet_NaN();
float a = cf.x - g.x;
float b = cf.y - g.y;
float c = cf.z - g.z;
float tsdf = 0.f;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 0)) * (1 - a) * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 0, g.z + 1)) * (1 - a) * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 0)) * (1 - a) * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 0, g.y + 1, g.z + 1)) * (1 - a) * b * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 0)) * a * (1 - b) * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 0, g.z + 1)) * a * (1 - b) * c;
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 0)) * a * b * (1 - c);
tsdf += unpack_tsdf(*volume(g.x + 1, g.y + 1, g.z + 1)) * a * b * c;
return tsdf;
}
struct TsdfRaycaster
{
TsdfVolume volume;
Aff3f aff;
Mat3f Rinv;
Vec3f volume_size;
Reprojector reproj;
float time_step;
float3 gradient_delta;
float3 voxel_size_inv;
TsdfRaycaster(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& _reproj);
__kf_device__
float fetch_tsdf(const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return unpack_tsdf(*volume(x, y, z));
}
__kf_device__
void operator()(PtrStepSz<ushort> depth, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= depth.cols || y >= depth.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
depth(y, x) = 0;
normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placed
// in the center of voxel (0,0,0), not in the corner of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0);
depth(y, x) = static_cast<ushort>(vertex.z * 1000);
}
break;
}
} /* for (;;) */
}
__kf_device__
void operator()(PtrStepSz<Point> points, PtrStep<Normal> normals) const
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= points.cols || y >= points.rows)
return;
const float qnan = numeric_limits<float>::quiet_NaN();
points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan);
float3 ray_org = aff.t;
float3 ray_dir = normalized( aff.R * reproj(x, y, 1.f) );
// We do subtract voxel size to minimize checks after
// Note: origin of volume coordinate is placeed
// in the center of voxel (0,0,0), not in the corener of the voxel!
float3 box_max = volume_size - volume.voxel_size;
float tmin, tmax;
intersect(ray_org, ray_dir, box_max, tmin, tmax);
const float min_dist = 0.f;
tmin = fmax(min_dist, tmin);
if (tmin >= tmax)
return;
tmax -= time_step;
float3 vstep = ray_dir * time_step;
float3 next = ray_org + ray_dir * tmin;
float tsdf_next = fetch_tsdf(next);
for (float tcurr = tmin; tcurr < tmax; tcurr += time_step)
{
float tsdf_curr = tsdf_next;
float3 curr = next;
next += vstep;
tsdf_next = fetch_tsdf(next);
if (tsdf_curr < 0.f && tsdf_next > 0.f)
break;
if (tsdf_curr > 0.f && tsdf_next < 0.f)
{
float Ft = interpolate(volume, curr * voxel_size_inv);
float Ftdt = interpolate(volume, next * voxel_size_inv);
float Ts = tcurr - __fdividef(time_step * Ft, Ftdt - Ft);
float3 vertex = ray_org + ray_dir * Ts;
float3 normal = compute_normal(vertex);
if (!isnan(normal.x * normal.y * normal.z))
{
normal = Rinv * normal;
vertex = Rinv * (vertex - aff.t);
normals(y, x) = make_float4(normal.x, normal.y, normal.z, 0.f);
points(y, x) = make_float4(vertex.x, vertex.y, vertex.z, 0.f);
}
break;
}
} /* for (;;) */
}
__kf_device__
float3 compute_normal(const float3& p) const
{
float3 n;
float Fx1 = interpolate(volume, make_float3(p.x + gradient_delta.x, p.y, p.z) * voxel_size_inv);
float Fx2 = interpolate(volume, make_float3(p.x - gradient_delta.x, p.y, p.z) * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
float Fy1 = interpolate(volume, make_float3(p.x, p.y + gradient_delta.y, p.z) * voxel_size_inv);
float Fy2 = interpolate(volume, make_float3(p.x, p.y - gradient_delta.y, p.z) * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
float Fz1 = interpolate(volume, make_float3(p.x, p.y, p.z + gradient_delta.z) * voxel_size_inv);
float Fz2 = interpolate(volume, make_float3(p.x, p.y, p.z - gradient_delta.z) * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
return normalized (n);
}
};
inline TsdfRaycaster::TsdfRaycaster(const TsdfVolume& _volume, const Aff3f& _aff, const Mat3f& _Rinv, const Reprojector& _reproj)
: volume(_volume), aff(_aff), Rinv(_Rinv), reproj(_reproj) {}
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<ushort> depth, PtrStep<Normal> normals)
{ raycaster(depth, normals); };
__global__ void raycast_kernel(const TsdfRaycaster raycaster, PtrStepSz<Point> points, PtrStep<Normal> normals)
{ raycaster(points, normals); };
}
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Depth& depth, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<ushort>)depth, normals);
cudaSafeCall (cudaGetLastError ());
}
void kfusion::device::raycast(const TsdfVolume& volume, const Aff3f& aff, const Mat3f& Rinv, const Reprojector& reproj,
Points& points, Normals& normals, float raycaster_step_factor, float gradient_delta_factor)
{
TsdfRaycaster rc(volume, aff, Rinv, reproj);
rc.volume_size = volume.voxel_size * volume.dims;
rc.time_step = volume.trunc_dist * raycaster_step_factor;
rc.gradient_delta = volume.voxel_size * gradient_delta_factor;
rc.voxel_size_inv = 1.f/volume.voxel_size;
dim3 block(32, 8);
dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y));
raycast_kernel<<<grid, block>>>(rc, (PtrStepSz<Point>)points, normals);
cudaSafeCall (cudaGetLastError ());
}
////////////////////////////////////////////////////////////////////////////////////////
/// Volume cloud exctraction
namespace kfusion
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Prefix Scan utility
enum ScanKind { exclusive, inclusive };
template<ScanKind Kind, class T>
__kf_device__ T scan_warp ( volatile T *ptr, const unsigned int idx = threadIdx.x )
{
const unsigned int lane = idx & 31; // index of thread in warp (0..31)
if (lane >= 1) ptr[idx] = ptr[idx - 1] + ptr[idx];
if (lane >= 2) ptr[idx] = ptr[idx - 2] + ptr[idx];
if (lane >= 4) ptr[idx] = ptr[idx - 4] + ptr[idx];
if (lane >= 8) ptr[idx] = ptr[idx - 8] + ptr[idx];
if (lane >= 16) ptr[idx] = ptr[idx - 16] + ptr[idx];
if (Kind == inclusive)
return ptr[idx];
else
return (lane > 0) ? ptr[idx - 1] : 0;
}
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
struct FullScan6
{
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
TsdfVolume volume;
Aff3f aff;
FullScan6(const TsdfVolume& vol) : volume(vol) {}
__kf_device__ float fetch(int x, int y, int z, int& weight) const
{
return unpack_tsdf(*volume(x, y, z), weight);
}
__kf_device__ void operator () (PtrSz<Point> output) const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
#if __CUDA_ARCH__ < 200
__shared__ int cta_buffer[CTA_SIZE];
#endif
#if __CUDA_ARCH__ >= 120
if (__all (x >= volume.dims.x) || __all (y >= volume.dims.y))
return;
#else
if (Emulation::All(x >= volume.dims.x, cta_buffer) || Emulation::All(y >= volume.dims.y, cta_buffer))
return;
#endif
float3 V;
V.x = (x + 0.5f) * volume.voxel_size.x;
V.y = (y + 0.5f) * volume.voxel_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < volume.dims.z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < volume.dims.x && y < volume.dims.y)
{
int W;
float F = fetch(x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * volume.voxel_size.z;
//process dx
if (x + 1 < volume.dims.x)
{
int Wn;
float Fn = fetch(x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + volume.voxel_size.x;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.x = (V.x * fabs (Fn) + Vnx * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (x + 1 < volume.dims.x) */
//process dy
if (y + 1 < volume.dims.y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + volume.voxel_size.y;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.y = (V.y * fabs (Fn) + Vny * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (y + 1 < volume.dims.y) */
//process dz
//if (z + 1 < volume.dims.z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + volume.voxel_size.z;
float d_inv = 1.f / (fabs (F) + fabs (Fn));
p.z = (V.z * fabs (Fn) + Vnz * fabs (F)) * d_inv;
points[local_count++] = aff * p;
}
} /* if (z + 1 < volume.dims.z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < volume.dims.x && y < volume.dims.y) */
#if __CUDA_ARCH__ >= 200
///not we fulfilled points array at current iteration
int total_warp = __popc (__ballot (local_count > 0)) + __popc (__ballot (local_count > 1)) + __popc (__ballot (local_count > 2));
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = local_count;
int total_warp = Emulation::warp_reduce(cta_buffer, tid);
#endif
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
Point *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
*pos = make_float4(x, y, z, 0.f);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < volume.dims.z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
}
};
__global__ void extract_kernel(const FullScan6 fs, PtrSz<Point> output) { fs(output); }
struct ExtractNormals
{
typedef float8 float8;
TsdfVolume volume;
PtrSz<Point> points;
float3 voxel_size_inv;
float3 gradient_delta;
Aff3f aff;
Mat3f Rinv;
ExtractNormals(const TsdfVolume& vol) : volume(vol)
{
voxel_size_inv.x = 1.f/volume.voxel_size.x;
voxel_size_inv.y = 1.f/volume.voxel_size.y;
voxel_size_inv.z = 1.f/volume.voxel_size.z;
}
__kf_device__ int3 getVoxel (const float3& p) const
{
//rounding to nearest even
int x = __float2int_rn (p.x * voxel_size_inv.x);
int y = __float2int_rn (p.y * voxel_size_inv.y);
int z = __float2int_rn (p.z * voxel_size_inv.z);
return make_int3 (x, y, z);
}
__kf_device__ void operator () (float4* output) const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
const float qnan = numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = Rinv * (tr(points.data[idx]) - aff.t);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < volume.dims.x - 2 && g.y < volume.dims.y - 2 && g.z < volume.dims.z - 2)
{
float3 t;
t = point;
t.x += gradient_delta.x;;
float Fx1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.x -= gradient_delta.x;
float Fx2 = interpolate(volume, t * voxel_size_inv);
n.x = __fdividef(Fx1 - Fx2, gradient_delta.x);
t = point;
t.y += gradient_delta.y;
float Fy1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.y -= gradient_delta.y;
float Fy2 = interpolate(volume, t * voxel_size_inv);
n.y = __fdividef(Fy1 - Fy2, gradient_delta.y);
t = point;
t.z += gradient_delta.z;
float Fz1 = interpolate(volume, t * voxel_size_inv);
t = point;
t.z -= gradient_delta.z;
float Fz2 = interpolate(volume, t * voxel_size_inv);
n.z = __fdividef(Fz1 - Fz2, gradient_delta.z);
n = normalized (aff.R * n);
}
output[idx] = make_float4(n.x, n.y, n.z, 0);
}
};
__global__ void extract_normals_kernel (const ExtractNormals en, float4* output) { en(output); }
}
}
size_t kfusion::device::extractCloud (const TsdfVolume& volume, const Aff3f& aff, PtrSz<Point> output)
{
typedef FullScan6 FS;
FS fs(volume);
fs.aff = aff;
dim3 block (FS::CTA_SIZE_X, FS::CTA_SIZE_Y);
dim3 grid (divUp (volume.dims.x, block.x), divUp (volume.dims.y, block.y));
extract_kernel<<<grid, block>>>(fs, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (size_t)size;
}
void kfusion::device::extractNormals (const TsdfVolume& volume, const PtrSz<Point>& points, const Aff3f& aff, const Mat3f& Rinv, float gradient_delta_factor, float4* output)
{
ExtractNormals en(volume);
en.points = points;
en.gradient_delta = volume.voxel_size * gradient_delta_factor;
en.aff = aff;
en.Rinv = Rinv;
dim3 block (32, 8);
dim3 grid (divUp ((int)points.size, block.x));
extract_normals_kernel<<<grid, block>>>(en, output);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
|
ecea9863355b88316bf420d65a53d4bb91060fe0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "hip/hip_runtime.h"
#include <cstdio>
#include "ocuutil/defines.h"
#include "ocuutil/timing_pool.h"
#include "ocuutil/kernel_wrapper.h"
namespace ocu {
KernelWrapper::KernelWrapper()
{
#ifdef OCU_ENABLE_GPU_TIMING_BY_DEFAULT
_timing_mode = TM_GPU;
#else
_timing_mode = TM_CPU;
#endif
}
void KernelWrapper::PreKernel()
{
if (_timing_mode & TM_CPU) {
_cpu_timer.start();
}
if (_timing_mode & TM_GPU) {
_gpu_timer.start();
}
}
bool KernelWrapper::PostKernel(const char *kernel_name, int resolution)
{
char buff[4096];
sprintf(buff, "%s(%d)", kernel_name, resolution);
return PostKernel(buff);
}
bool KernelWrapper::PostKernel(const char *kernel_name)
{
if (_timing_mode & TM_GPU) {
_gpu_timer.stop();
global_timer_add_timing(kernel_name, _gpu_timer.elapsed_ms());
}
if (_timing_mode & TM_CPU) {
_cpu_timer.stop();
char buff[4096];
sprintf(buff, "%sCPU", kernel_name);
//global_timer_add_timing(buff, _cpu_timer.elapsed_ms());
}
hipError_t er = hipGetLastError();
if (er != (unsigned int)hipSuccess) {
printf("[ERROR] %s - CUDA error \"%s\"\n", kernel_name, hipGetErrorString(er));
return false;
}
return true;
}
} // end namespace
| ecea9863355b88316bf420d65a53d4bb91060fe0.cu | /*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda.h"
#include <cstdio>
#include "ocuutil/defines.h"
#include "ocuutil/timing_pool.h"
#include "ocuutil/kernel_wrapper.h"
namespace ocu {
KernelWrapper::KernelWrapper()
{
#ifdef OCU_ENABLE_GPU_TIMING_BY_DEFAULT
_timing_mode = TM_GPU;
#else
_timing_mode = TM_CPU;
#endif
}
void KernelWrapper::PreKernel()
{
if (_timing_mode & TM_CPU) {
_cpu_timer.start();
}
if (_timing_mode & TM_GPU) {
_gpu_timer.start();
}
}
bool KernelWrapper::PostKernel(const char *kernel_name, int resolution)
{
char buff[4096];
sprintf(buff, "%s(%d)", kernel_name, resolution);
return PostKernel(buff);
}
bool KernelWrapper::PostKernel(const char *kernel_name)
{
if (_timing_mode & TM_GPU) {
_gpu_timer.stop();
global_timer_add_timing(kernel_name, _gpu_timer.elapsed_ms());
}
if (_timing_mode & TM_CPU) {
_cpu_timer.stop();
char buff[4096];
sprintf(buff, "%sCPU", kernel_name);
//global_timer_add_timing(buff, _cpu_timer.elapsed_ms());
}
cudaError_t er = cudaGetLastError();
if (er != (unsigned int)CUDA_SUCCESS) {
printf("[ERROR] %s - CUDA error \"%s\"\n", kernel_name, cudaGetErrorString(er));
return false;
}
return true;
}
} // end namespace
|
d9f9c8e0ee8312a08d2ee1656767ff0a95430b99.hip | // !!! This is a file automatically generated by hipify!!!
/*
Finds: TLB misses
For the Tesla V100-SXM2-16GB's TLB
Soure code based on paper https://arxiv.org/pdf/1509.02308.pdf
*/
#include <stdio.h>
#include <stdint.h>
#include "hip/hip_runtime.h"
#define LEN 256
__global__ void global_latency(unsigned int* my_array, int N, int iterations, unsigned int* duration, unsigned int* index) {
// data access latencies array
__shared__ unsigned int s_tvalue[LEN];
// accessed data indices array
__shared__ unsigned int s_index[LEN];
// initialize arrays
for (int k = 0; k < LEN; k++) {
s_index[k] = 0;
s_tvalue[k] = 0;
}
// warm up the TLB
unsigned int j = 0;
for (int k = 0; k < LEN*iterations; k++)
j = my_array[j];
// ready to begin benchmarking
unsigned int start_time, end_time;
for (int k = 0; k < iterations*LEN; k++) {
start_time = clock();
// traverse array with elements initialized as indices of next memory access
j = my_array[j];
// handles ILP with this data dependency
s_index[k]= j;
end_time = clock();
s_tvalue[k] = end_time - start_time;
}
my_array[N] = j;
my_array[N+1] = my_array[j];
for(int k = 0; k < LEN; k++){
index[k] = s_index[k];
duration[k] = s_tvalue[k];
}
}
void parametric_measure_global(int N, int iterations, int stride) {
// destroy context
hipDeviceReset();
hipError_t error_id;
// host (CPU) array
unsigned int * h_a;
h_a = (unsigned int*) malloc((N+2) * sizeof(unsigned int));
for (int i = 0; i < N; i++) {
h_a[i] = (i+stride) % N;
}
h_a[N] = 0;
h_a[N+1] = 0;
// device (GPU) array
unsigned int * d_a;
error_id = hipMalloc((void **) &d_a, (N+2) * sizeof(unsigned int));
if (error_id != hipSuccess) {
printf("Error from allocating device array is %s\n", hipGetErrorString(error_id));
}
error_id = hipMemcpy(d_a, h_a, sizeof(unsigned int) * N, hipMemcpyHostToDevice);
if (error_id != hipSuccess) {
printf("Error from copying over host array is %s\n", hipGetErrorString(error_id));
}
// accessed data indices array on host (CPU)
unsigned int *h_index = (unsigned int*) malloc(LEN*sizeof(unsigned int));
// accessed data indices array on device (GPU)
unsigned int *d_index;
error_id = hipMalloc((void **) &d_index, sizeof(unsigned int)*LEN );
if (error_id != hipSuccess) {
printf("Error from allocating indices array is %s\n", hipGetErrorString(error_id));
}
// data access latencies array on host (CPU)
unsigned int *h_duration = (unsigned int*) malloc(LEN*sizeof(unsigned int));
// data access latencies array on device (GPU)
unsigned int *d_duration;
error_id = hipMalloc ((void**) &d_duration, LEN*sizeof(unsigned int));
if (error_id != hipSuccess) {
printf("Error from allocating latencies array is %s\n", hipGetErrorString(error_id));
}
// blocks until the device has completed all preceding requested tasks
hipDeviceSynchronize();
// 1 x 1 block of threads
dim3 Db = dim3(1);
// 1 x 1 x 1 block of threads
dim3 Dg = dim3(1,1,1);
// launch kernel
hipLaunchKernelGGL(( global_latency), dim3(Dg), dim3(Db), 0, 0, d_a, N, iterations, d_duration, d_index);
hipDeviceSynchronize();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error from kernel is %s\n", hipGetErrorString(error_id));
}
hipDeviceSynchronize();
// copy results from GPU to CPU
error_id = hipMemcpy((void*) h_duration, (void*) d_duration, LEN*sizeof(unsigned int), hipMemcpyDeviceToHost);
if (error_id != hipSuccess) {
printf("Error 2.0 is %s\n", hipGetErrorString(error_id));
}
error_id = hipMemcpy((void*) h_index, (void*) d_index, LEN*sizeof(unsigned int), hipMemcpyDeviceToHost);
if (error_id != hipSuccess) {
printf("Error 2.1 is %s\n", hipGetErrorString(error_id));
}
hipDeviceSynchronize();
for(int i = 0; i < LEN; i++) {
printf("%d\t %d\n", h_index[i], h_duration[i]);
}
// free memory on GPU
hipFree(d_a);
hipFree(d_index);
hipFree(d_duration);
// free memory on CPU
free(h_a);
free(h_index);
free(h_duration);
// destroy context
hipDeviceReset();
}
void measure_global() {
int iterations = 1;
// 2 MB stride
int stride = 2*1024*1024/sizeof(unsigned int);
//1. The L1 TLB has 16 entries. Test with N_min=28 *1024*256, N_max>32*1024*256
//2. The L2 TLB has 65 entries. Test with N_min=128*1024*256, N_max=160*1024*256
for (int N = 28*1024*256; N <= 46*1024*256; N+=stride) {
printf("\n=====%3.1f MB array, warm TLB, read 256 element====\n", sizeof(unsigned int)*(float)N/1024/1024);
printf("Stride = %d element, %d MB\n", stride, stride * sizeof(unsigned int)/1024/1024);
parametric_measure_global(N, iterations, stride);
printf("===============================================\n\n");
}
}
int main() {
// current device
hipSetDevice(0);
measure_global();
// destroy context
hipDeviceReset();
return 0;
}
| d9f9c8e0ee8312a08d2ee1656767ff0a95430b99.cu | /*
Finds: TLB misses
For the Tesla V100-SXM2-16GB's TLB
Soure code based on paper https://arxiv.org/pdf/1509.02308.pdf
*/
#include <stdio.h>
#include <stdint.h>
#include "cuda_runtime.h"
#define LEN 256
__global__ void global_latency(unsigned int* my_array, int N, int iterations, unsigned int* duration, unsigned int* index) {
// data access latencies array
__shared__ unsigned int s_tvalue[LEN];
// accessed data indices array
__shared__ unsigned int s_index[LEN];
// initialize arrays
for (int k = 0; k < LEN; k++) {
s_index[k] = 0;
s_tvalue[k] = 0;
}
// warm up the TLB
unsigned int j = 0;
for (int k = 0; k < LEN*iterations; k++)
j = my_array[j];
// ready to begin benchmarking
unsigned int start_time, end_time;
for (int k = 0; k < iterations*LEN; k++) {
start_time = clock();
// traverse array with elements initialized as indices of next memory access
j = my_array[j];
// handles ILP with this data dependency
s_index[k]= j;
end_time = clock();
s_tvalue[k] = end_time - start_time;
}
my_array[N] = j;
my_array[N+1] = my_array[j];
for(int k = 0; k < LEN; k++){
index[k] = s_index[k];
duration[k] = s_tvalue[k];
}
}
void parametric_measure_global(int N, int iterations, int stride) {
// destroy context
cudaDeviceReset();
cudaError_t error_id;
// host (CPU) array
unsigned int * h_a;
h_a = (unsigned int*) malloc((N+2) * sizeof(unsigned int));
for (int i = 0; i < N; i++) {
h_a[i] = (i+stride) % N;
}
h_a[N] = 0;
h_a[N+1] = 0;
// device (GPU) array
unsigned int * d_a;
error_id = cudaMalloc((void **) &d_a, (N+2) * sizeof(unsigned int));
if (error_id != cudaSuccess) {
printf("Error from allocating device array is %s\n", cudaGetErrorString(error_id));
}
error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
if (error_id != cudaSuccess) {
printf("Error from copying over host array is %s\n", cudaGetErrorString(error_id));
}
// accessed data indices array on host (CPU)
unsigned int *h_index = (unsigned int*) malloc(LEN*sizeof(unsigned int));
// accessed data indices array on device (GPU)
unsigned int *d_index;
error_id = cudaMalloc((void **) &d_index, sizeof(unsigned int)*LEN );
if (error_id != cudaSuccess) {
printf("Error from allocating indices array is %s\n", cudaGetErrorString(error_id));
}
// data access latencies array on host (CPU)
unsigned int *h_duration = (unsigned int*) malloc(LEN*sizeof(unsigned int));
// data access latencies array on device (GPU)
unsigned int *d_duration;
error_id = cudaMalloc ((void**) &d_duration, LEN*sizeof(unsigned int));
if (error_id != cudaSuccess) {
printf("Error from allocating latencies array is %s\n", cudaGetErrorString(error_id));
}
// blocks until the device has completed all preceding requested tasks
cudaThreadSynchronize();
// 1 x 1 block of threads
dim3 Db = dim3(1);
// 1 x 1 x 1 block of threads
dim3 Dg = dim3(1,1,1);
// launch kernel
global_latency<<<Dg, Db>>>(d_a, N, iterations, d_duration, d_index);
cudaThreadSynchronize();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error from kernel is %s\n", cudaGetErrorString(error_id));
}
cudaThreadSynchronize();
// copy results from GPU to CPU
error_id = cudaMemcpy((void*) h_duration, (void*) d_duration, LEN*sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.0 is %s\n", cudaGetErrorString(error_id));
}
error_id = cudaMemcpy((void*) h_index, (void*) d_index, LEN*sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) {
printf("Error 2.1 is %s\n", cudaGetErrorString(error_id));
}
cudaThreadSynchronize();
for(int i = 0; i < LEN; i++) {
printf("%d\t %d\n", h_index[i], h_duration[i]);
}
// free memory on GPU
cudaFree(d_a);
cudaFree(d_index);
cudaFree(d_duration);
// free memory on CPU
free(h_a);
free(h_index);
free(h_duration);
// destroy context
cudaDeviceReset();
}
void measure_global() {
int iterations = 1;
// 2 MB stride
int stride = 2*1024*1024/sizeof(unsigned int);
//1. The L1 TLB has 16 entries. Test with N_min=28 *1024*256, N_max>32*1024*256
//2. The L2 TLB has 65 entries. Test with N_min=128*1024*256, N_max=160*1024*256
for (int N = 28*1024*256; N <= 46*1024*256; N+=stride) {
printf("\n=====%3.1f MB array, warm TLB, read 256 element====\n", sizeof(unsigned int)*(float)N/1024/1024);
printf("Stride = %d element, %d MB\n", stride, stride * sizeof(unsigned int)/1024/1024);
parametric_measure_global(N, iterations, stride);
printf("===============================================\n\n");
}
}
int main() {
// current device
cudaSetDevice(0);
measure_global();
// destroy context
cudaDeviceReset();
return 0;
}
|
463f442faf9341510815980ae3a0900f3b36f275.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) //
#define NF_SIZE (1 << 6) // Nf
#define NO_SHARED 0 // shared memory flag
#define SHARED 1 // shared memory flag
#define BLOCK_SIZE (1 << 6) // CUDA thread block
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) //
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int sum = 0;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
sum += d_ArrayElements[id + i];
}
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf
// shared .
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// index - Nf index + Nf C
// GPU kernel
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// bin
// 4 , 4 Nf , N int
// -100 ~ 100
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
//
// Shared_flag NO_SHARED SHARED
// flag
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
hipError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
hipError_t cudaStatus;
CUDA_CALL(hipSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(hipMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(hipMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(hipMemcpy(d_ArrayElements, p_ArrayElements, mem_size, hipMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
break;
}
CUDA_CALL(cudaStatus = hipDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(hipMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, hipMemcpyDeviceToHost));
hipFree(d_ArrayElements);
hipFree(d_SumOfElements);
return cudaStatus;
}
| 463f442faf9341510815980ae3a0900f3b36f275.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#define IN
#define OUT
#define INOUT
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
#define N_SIZE (1 << 26) // 전체 데이터 사이즈
#define NF_SIZE (1 << 6) // Nf 크기
#define NO_SHARED 0 // shared memory를 사용하지 않는 커널 실행 flag
#define SHARED 1 // shared memory를 사용하는 커널 실행 flag
#define BLOCK_SIZE (1 << 6) // CUDA 커널 thread block 사이즈
#define BLOCK_WIDTH (1 << 3)
#define BLOCK_HEIGHT (BLOCK_SIZE / BLOCK_WIDTH)
#define N_ITERATION (1 << 0) // 실험 반복 횟수
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
int N;
int Nf;
int *h_ArrayElements;
int *h_SumOfArrayElements_CPU;
int *h_SumOfArrayElements_GPU_No_Shared;
int *h_SumOfArrayElements_GPU_Shared;
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용하지 않는다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_No_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
int sum = 0;
for (int i = -Nf; i <= Nf; i++) {
if (id + i >= N || id + i < 0) continue;
sum += d_ArrayElements[id + i];
}
d_SumOfArrayElements[id] = sum;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 커널 코드
// 이 커널은 shared 메모리를 사용한다.
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void Sum_n_elements_Kernel_shared(IN int *d_ArrayElements, OUT int *d_SumOfArrayElements, int N, int Nf) {
const unsigned block_id = blockIdx.y * gridDim.x + blockIdx.x;
const unsigned thread_id = threadIdx.y * blockDim.x + threadIdx.x;
const unsigned id = block_id * BLOCK_SIZE + thread_id;
/*Todo*/
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 배열의 index - Nf 부터 index + Nf 데이터 까지의 합을 계산하는 C 코드
// GPU kernel의 결과와 비교를 통해 옳은 계산을 하였는지 판단하는 데이터로 활용
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void Sum_n_elements_CPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_CPU, int Nf) {
int i, j, sum;
for (i = 0; i < N; i++) {
sum = 0;
for (j = -Nf; j <= Nf; j++) {
if (i + j >= N || i + j < 0) continue;
sum += p_ArrayElements[i + j];
}
p_SumOfElements_CPU[i] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 주어진 bin 파일을 읽는 코드
// 첫 4바이트는 전체 데이터의 개수, 다음 4바이트는 Nf의 크기, 그 이후 N개의 int형 데이터가 저장
// 데이터는 -100 ~ 100 까지의 범위 안의 정수
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void read_bin_file() {
printf("***Binary File Read Start!!\n");
FILE *fp = fopen("gen.bin", "rb");
fread(&N, sizeof(int), 1, fp);
fread(&Nf, sizeof(int), 1, fp);
h_ArrayElements = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_CPU = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_No_Shared = (int *)malloc(N * sizeof(int));
h_SumOfArrayElements_GPU_Shared = (int *)malloc(N * sizeof(int));
fread(h_ArrayElements, sizeof(int), N, fp);
fclose(fp);
printf("***Binary File Read End!!\n\n");
}
void init_bin_file(IN int n, IN int nf) {
printf("***Binary File Create Start!!\n");
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&n, sizeof(int), 1, fp);
fwrite(&nf, sizeof(int), 1, fp);
int i, input;
for (i = 0; i < n; i++) {
input = (int)((float)rand() / RAND_MAX * 200 - 100);
fwrite(&input, sizeof(int), 1, fp);
}
fclose(fp);
printf("***Binary File Create End!!\n\n");
}
int main()
{
int i;
init_bin_file(N_SIZE, NF_SIZE);
read_bin_file();
TIMER_T CPU_time = 0.0f, GPU_time_NO_SHARED = 0.0f, GPU_time_SHARED = 0.0f;
for (i = 0; i < N_ITERATION; i++) {
CHECK_TIME_START;
Sum_n_elements_CPU(h_ArrayElements, h_SumOfArrayElements_CPU, Nf);
CHECK_TIME_END(compute_time);
CPU_time += compute_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_No_Shared, Nf, NO_SHARED);
GPU_time_NO_SHARED += device_time;
Sum_n_elements_GPU(h_ArrayElements, h_SumOfArrayElements_GPU_Shared, Nf, SHARED);
GPU_time_SHARED += device_time;
}
for (i = 0; i < N; i++) {
if (h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_No_Shared[i] || h_SumOfArrayElements_CPU[i] != h_SumOfArrayElements_GPU_Shared[i]) {
printf("%d : CPU : %d,\tGPU no shared : %d,\tGPU shared : %d\n", i, h_SumOfArrayElements_CPU[i], h_SumOfArrayElements_GPU_No_Shared[i], h_SumOfArrayElements_GPU_Shared[i]);
break;
}
}
if (i == N)
printf("***Kernel execution Success!!\n\n");
printf("***CPU compute time : %.3f ms\n", CPU_time / N_ITERATION);
printf("***GPU NO SHARED compute time : %.3f ms\n", GPU_time_NO_SHARED / N_ITERATION);
printf("***GPU SHARED compute time : %.3f ms\n", GPU_time_SHARED / N_ITERATION);
free(h_ArrayElements);
free(h_SumOfArrayElements_CPU);
free(h_SumOfArrayElements_GPU_No_Shared);
free(h_SumOfArrayElements_GPU_Shared);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// 커널을 실행하기 전 필요한 자료들 준비 및 커널을 실행할 디바이스를 설정
// Shared_flag 입력 시 NO_SHARED 나 SHARED 중 한 개의 매크로를 넣으면
// flag값에 맞는 커널을 실행
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
cudaError_t Sum_n_elements_GPU(IN int *p_ArrayElements, OUT int *p_SumOfElements_GPU, int Nf, int Shared_flag) {
cudaError_t cudaStatus;
CUDA_CALL(cudaSetDevice(0));
int *d_ArrayElements, *d_SumOfElements;
size_t mem_size;
mem_size = N * sizeof(int);
CUDA_CALL(cudaMalloc(&d_ArrayElements, mem_size));
CUDA_CALL(cudaMalloc(&d_SumOfElements, mem_size));
CUDA_CALL(cudaMemcpy(d_ArrayElements, p_ArrayElements, mem_size, cudaMemcpyHostToDevice));
dim3 blockDIm(BLOCK_WIDTH, BLOCK_HEIGHT);
dim3 gridDim(N / BLOCK_SIZE);
CHECK_TIME_INIT_GPU();
CHECK_TIME_START_GPU();
switch (Shared_flag)
{
case NO_SHARED:
Sum_n_elements_Kernel_No_shared << <gridDim, blockDIm >> > (d_ArrayElements, d_SumOfElements, N, Nf);
break;
case SHARED:
break;
}
CUDA_CALL(cudaStatus = cudaDeviceSynchronize());
CHECK_TIME_END_GPU(device_time);
CHECK_TIME_DEST_GPU();
CUDA_CALL(cudaMemcpy(p_SumOfElements_GPU, d_SumOfElements, mem_size, cudaMemcpyDeviceToHost));
cudaFree(d_ArrayElements);
cudaFree(d_SumOfElements);
return cudaStatus;
}
|
9c99265a49ec48321b9e6c2c1fe13997ebad5dca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void check_collisions( float x1_robot, float y1_robot, float x2_robot, float y2_robot, float *x1_obs, float *y1_obs, float *x2_obs, float *y2_obs, bool *collisions, int *indexes)
{
int obstacleId = threadIdx.x;
bool xcol = ((x1_obs[obstacleId] <= x1_robot && x1_robot <= x2_obs[obstacleId])
|| (x1_obs[obstacleId] <= x2_robot && x2_robot <= x2_obs[obstacleId]))
|| ( x1_robot <= x1_obs[obstacleId] && x2_robot >= x2_obs[obstacleId]);
bool ycol = ((y1_obs[obstacleId] <= y1_robot && y1_robot <= y2_obs[obstacleId])
|| (y1_obs[obstacleId] <= y2_robot && y2_robot <= y2_obs[obstacleId]))
|| ( y1_robot <= y1_obs[obstacleId] && y2_robot >= y2_obs[obstacleId]);
collisions[obstacleId] = (xcol && ycol);
} | 9c99265a49ec48321b9e6c2c1fe13997ebad5dca.cu | #include "includes.h"
__global__ void check_collisions( float x1_robot, float y1_robot, float x2_robot, float y2_robot, float *x1_obs, float *y1_obs, float *x2_obs, float *y2_obs, bool *collisions, int *indexes)
{
int obstacleId = threadIdx.x;
bool xcol = ((x1_obs[obstacleId] <= x1_robot && x1_robot <= x2_obs[obstacleId])
|| (x1_obs[obstacleId] <= x2_robot && x2_robot <= x2_obs[obstacleId]))
|| ( x1_robot <= x1_obs[obstacleId] && x2_robot >= x2_obs[obstacleId]);
bool ycol = ((y1_obs[obstacleId] <= y1_robot && y1_robot <= y2_obs[obstacleId])
|| (y1_obs[obstacleId] <= y2_robot && y2_robot <= y2_obs[obstacleId]))
|| ( y1_robot <= y1_obs[obstacleId] && y2_robot >= y2_obs[obstacleId]);
collisions[obstacleId] = (xcol && ycol);
} |
scan_largearray.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) );
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, 16);
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
preOperation(num_elements);
prescanArray(d_odata, d_idata, num_elements);
prevOperation();
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( hipDeviceSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// prevOperation();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
| scan_largearray.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cutil.h>
// includes, kernels
#include <scan_largearray_kernel.cu>
#define DEFAULT_NUM_ELEMENTS 16777216
#define MAX_RAND 3
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int ReadFile(float*, char* file_name, int size);
void WriteFile(float*, char* file_name, int size);
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int errorM = 0;
float device_time;
float host_time;
int* size = NULL; //(int*)malloc(1 * sizeof(int));
unsigned int data2read = 1;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( float) * num_elements;
float* h_data = (float*) malloc( mem_size);
// * No arguments: Randomly generate input data and compare against the
// host's result.
// * One argument: Randomly generate input data and write the result to
// file name specified by first argument
// * Two arguments: Read the first argument which indicate the size of the array,
// randomly generate input data and write the input data
// to the second argument. (for generating random input data)
// * Three arguments: Read the first file which indicate the size of the array,
// then input data from the file name specified by 2nd argument and write the
// SCAN output to file name specified by the 3rd argument.
switch(argc-1)
{
case 2:
// Determine size of array
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = (int)(rand() % MAX_RAND);
}
WriteFile(h_data, argv[2], num_elements);
break;
case 3: // Three Arguments
cutReadFilei(argv[1], &size, &data2read, true);
if(data2read != 1){
printf("Error reading parameter file\n");
exit(1);
}
num_elements = size[0];
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
errorM = ReadFile(h_data, argv[2], size[0]);
if(errorM != 1)
{
printf("Error reading input file!\n");
exit(1);
}
break;
default: // No Arguments or one argument
// initialize the input data on the host to be integer values
// between 0 and 1000
// Use DEFAULT_NUM_ELEMENTS num_elements
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( float) * num_elements;
h_data = (float*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
unsigned int timer;
CUT_SAFE_CALL(cutCreateTimer(&timer));
// compute reference solution
float* reference = (float*) malloc( mem_size);
cutStartTimer(timer);
computeGold( reference, h_data, num_elements);
cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer));
host_time = cutGetTimerValue(timer);
CUT_SAFE_CALL(cutDeleteTimer(timer));
// **===-------- Lab4: Allocate data structure here -----------===**
// allocate device memory input and output arrays
float* d_idata = NULL;
float* d_odata = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// initialize all the other device arrays to be safe
CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) );
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
prescanArray(d_odata, d_idata, 16);
// Run the prescan
CUT_SAFE_CALL(cutCreateTimer(&timer));
cutStartTimer(timer);
// **===-------- Lab4: Modify the body of this function -----------===**
preOperation(num_elements);
prescanArray(d_odata, d_idata, num_elements);
prevOperation();
// **===-----------------------------------------------------------===**
CUDA_SAFE_CALL( cudaThreadSynchronize() );
cutStopTimer(timer);
printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer));
device_time = cutGetTimerValue(timer);
printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Lab4: Deallocate data structure here -----------===**
// prevOperation();
// **===-----------------------------------------------------------===**
// copy result from device to host
CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
if ((argc - 1) == 3) // Three Arguments, write result to file
{
WriteFile(h_data, argv[3], num_elements);
}
else if ((argc - 1) == 1) // One Argument, write result to file
{
WriteFile(h_data, argv[1], num_elements);
}
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
cutDeleteTimer(timer);
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
int ReadFile(float* M, char* file_name, int size)
{
unsigned int elements_read = size;
if (cutReadFilef(file_name, &M, &elements_read, true))
return 1;
else
return 0;
}
void WriteFile(float* M, char* file_name, int size)
{
cutWriteFilef(file_name, M, size, 0.0001f);
}
|
cd6ecc4efb919e0904d190635c42020aace8698f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sobel(unsigned char *output, unsigned char *input, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
const int BLOCK_SIZE = 16;
// Where does our data start
int blockStartIndexX = blockIdx.x * blockDim.x - 1;
int blockStartIndexY = blockIdx.y * blockDim.y - 1;
// Clamp to edge
if (blockStartIndexX < 0)
blockStartIndexX = 0;
if (blockStartIndexX >= width)
blockStartIndexX = blockDim.x - 1;
if (blockStartIndexY < 0)
blockStartIndexY = 0;
if (blockStartIndexY >= height)
blockStartIndexY = blockDim.y - 1;
// Shared Data
__shared__ unsigned char pixels[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// Where is our data
unsigned char* cacheInput = input + (blockStartIndexX + blockStartIndexY * width);
// Linear index (16x16 -> 0..255)
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
int maxLoadSizeBytes = (BLOCK_SIZE + 2) * (BLOCK_SIZE + 2); // 18x18 Block -> 324 Bytes
int maxIndexBytes = maxLoadSizeBytes / sizeof(short); // 18x18 Block -> Index 162
if (threadIndex < maxIndexBytes)
{
// Calculate offset
int offsetInBytes = threadIndex * sizeof(short);
int block_half = (BLOCK_SIZE + 2) / 2;
int byteRow = offsetInBytes / (BLOCK_SIZE + 2);
int byteCol = threadIndex % block_half * 2;
int offset = byteCol + byteRow * width;
//int offsetBuffer = byteCol + byteRow * (BLOCK_SIZE + 2);
// Copy Data
unsigned char* toLoad = cacheInput + offset;
/**(&pixels[0][0] + offsetBuffer) = *toLoad;
*(&pixels[0][0] + offsetBuffer + 1) = *(toLoad + 1);*/
pixels[byteRow][byteCol] = *toLoad;
pixels[byteRow][byteCol + 1] = *(toLoad + 1);
}
__syncthreads();
// Sobel weights
float weightsX[9] = { -1, -2, -1,
0, 0, 0,
1, 2, 1 };
float weightsY[9] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
int offsetY[9] = { -1, -1, -1,
0, 0, 0,
1, 1, 1 };
int offsetX[9] = { -1, 0, 1,
-1, 0, 1,
-1, 0, 1 };
float pointX = 0.f;
float pointY = 0.f;
#pragma unroll
for (int i = 0; i < 9; i++)
{
int indexX = threadIdx.x + 1 + offsetX[i];
int indexY = threadIdx.y + 1 + offsetY[i];
unsigned char pixel = pixels[indexY][indexX];
pointX += pixel * weightsX[i];
pointY += pixel * weightsY[i];
}
// Do Sobel here!
int index = x + y * width;
unsigned char * outputData = output + index;
outputData[0] = sqrtf(pointX * pointX + pointY * pointY);
} | cd6ecc4efb919e0904d190635c42020aace8698f.cu | #include "includes.h"
__global__ void sobel(unsigned char *output, unsigned char *input, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
const int BLOCK_SIZE = 16;
// Where does our data start
int blockStartIndexX = blockIdx.x * blockDim.x - 1;
int blockStartIndexY = blockIdx.y * blockDim.y - 1;
// Clamp to edge
if (blockStartIndexX < 0)
blockStartIndexX = 0;
if (blockStartIndexX >= width)
blockStartIndexX = blockDim.x - 1;
if (blockStartIndexY < 0)
blockStartIndexY = 0;
if (blockStartIndexY >= height)
blockStartIndexY = blockDim.y - 1;
// Shared Data
__shared__ unsigned char pixels[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
// Where is our data
unsigned char* cacheInput = input + (blockStartIndexX + blockStartIndexY * width);
// Linear index (16x16 -> 0..255)
int threadIndex = threadIdx.x + threadIdx.y * blockDim.x;
int maxLoadSizeBytes = (BLOCK_SIZE + 2) * (BLOCK_SIZE + 2); // 18x18 Block -> 324 Bytes
int maxIndexBytes = maxLoadSizeBytes / sizeof(short); // 18x18 Block -> Index 162
if (threadIndex < maxIndexBytes)
{
// Calculate offset
int offsetInBytes = threadIndex * sizeof(short);
int block_half = (BLOCK_SIZE + 2) / 2;
int byteRow = offsetInBytes / (BLOCK_SIZE + 2);
int byteCol = threadIndex % block_half * 2;
int offset = byteCol + byteRow * width;
//int offsetBuffer = byteCol + byteRow * (BLOCK_SIZE + 2);
// Copy Data
unsigned char* toLoad = cacheInput + offset;
/**(&pixels[0][0] + offsetBuffer) = *toLoad;
*(&pixels[0][0] + offsetBuffer + 1) = *(toLoad + 1);*/
pixels[byteRow][byteCol] = *toLoad;
pixels[byteRow][byteCol + 1] = *(toLoad + 1);
}
__syncthreads();
// Sobel weights
float weightsX[9] = { -1, -2, -1,
0, 0, 0,
1, 2, 1 };
float weightsY[9] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
int offsetY[9] = { -1, -1, -1,
0, 0, 0,
1, 1, 1 };
int offsetX[9] = { -1, 0, 1,
-1, 0, 1,
-1, 0, 1 };
float pointX = 0.f;
float pointY = 0.f;
#pragma unroll
for (int i = 0; i < 9; i++)
{
int indexX = threadIdx.x + 1 + offsetX[i];
int indexY = threadIdx.y + 1 + offsetY[i];
unsigned char pixel = pixels[indexY][indexX];
pointX += pixel * weightsX[i];
pointY += pixel * weightsY[i];
}
// Do Sobel here!
int index = x + y * width;
unsigned char * outputData = output + index;
outputData[0] = sqrtf(pointX * pointX + pointY * pointY);
} |
ea737e11016bccb24328c9d5e6b30b8afc72be8a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* test_mc.c
*
* Created on: 02-Feb-2009
* Author: alee
*/
#include <stdio.h>
#include "rng.h"
//#include <cutil.h>
#include "reduce.h"
#include "mc_gauss.h"
#include "mc_mix_gauss.h"
#include "mc_mix_gauss_mu.h"
#include "gauss.h"
#include "test_functions.h"
#include "mc_gauss_mv.h"
#include "mix_gauss.h"
#include "matrix.h"
#include "order.h"
void test_mcgaussmv_nolog(int N, int D, float* h_args_p, float* h_args_q, float* props, int nb, int nt) {
// unsigned int hTimer;
// double htime, gtime;
// cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
hipMemcpy(d_array, props, N * D * sizeof(float), hipMemcpyHostToDevice);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 0, nb, nt);
hipDeviceSynchronize();
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
// printf("speedup = %f\n", htime / gtime);
hipFree(d_array);
hipFree(d_warray);
}
void test_mcgaussmv_log(int N, int D, float* h_args_p, float* h_args_q, float* props, int nb, int nt) {
// unsigned int hTimer;
// double htime,gtime;
// cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
hipMemcpy(d_array, props, N * D * sizeof(float), hipMemcpyHostToDevice);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
hipDeviceSynchronize();
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
// printf("speedup = %f\n", htime / gtime);
hipFree(d_array);
hipFree(d_warray);
}
// importance sampling with multivariate Gaussian proposal and target distributions
void test_mcgauss_mv(int N, int nb, int nt) {
const int D = 2;
printf("\nIS: Gaussian-Gaussian 2D\n");
float h_args_p[1 + D * D + D];
float cov_p[D * D];
matrix_set(cov_p, D, D, 0, 0, 1.0f);
matrix_set(cov_p, D, D, 0, 1, 0.5f);
matrix_set(cov_p, D, D, 1, 0, 0.5f);
matrix_set(cov_p, D, D, 1, 1, 2.0f);
compute_c1_c2(cov_p, D, h_args_p[0], h_args_p + 1);
h_args_p[5] = 1;
h_args_p[6] = 1;
float h_args_q[1 + D * D + D];
float cov_q[D * D];
matrix_set(cov_q, D, D, 0, 0, 1.0f);
matrix_set(cov_q, D, D, 0, 1, 0.0f);
matrix_set(cov_q, D, D, 1, 0, 0.0f);
matrix_set(cov_q, D, D, 1, 1, 1.0f);
compute_c1_c2(cov_q, D, h_args_q[0], h_args_q + 1);
h_args_q[5] = 0;
h_args_q[6] = 0;
float* array = (float*) malloc(N * D * sizeof(float));
populate_randn(array, N * D);
hipDeviceSynchronize();
test_mcgaussmv_nolog(N, D, h_args_p, h_args_q, array,nb,nt);
test_mcgaussmv_log(N, D, h_args_p, h_args_q, array,nb,nt);
free(array);
}
// importance sampling with univariate Gaussian proposal and target distributions
void test_mcgauss(int N, int nb, int nt) {
// unsigned int hTimer;
// double ctime, gtime;
// cutCreateTimer(&hTimer);
printf("\nIS: Gaussian-Gaussian 1D\n");
float h_args_p[3];
float h_args_q[3];
// p is N(2,0.25), q is N(0,1)
compute_c1_c2(0.5f, h_args_p[0], h_args_p[1]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_p[2] = 2;
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
float h_sum = 0;
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
// cutStopTimer(hTimer);
// ctime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", ctime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
hipMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn(N, d_array, d_warray, h_args_p, h_args_q, nb, nt);
hipDeviceSynchronize();
multiply(N, d_array, d_array2, d_warray, nb, nt);
hipDeviceSynchronize();
reduce(N, d_array2, h_sum, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
// printf("speedup = %f\n", ctime / gtime);
hipFree(d_array);
hipFree(d_array2);
hipFree(d_warray);
}
// importance sampling with target distribution being a mixture of univariate Gaussians and
// proposal distribution being Gaussian
void test_mixgauss(int N, int nb, int nt) {
// unsigned int hTimer;
// double htime, gtime;
// cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians 1D\n");
const int k = 2;
float h_args_p[1 + 3 * k];
float h_args_q[3];
// p is N(2,2), q is N(0,1)
h_args_p[0] = k;
h_args_p[1] = 0;
h_args_p[2] = 3;
compute_ci1_ci2(0.5f, 0.5f, h_args_p[3], h_args_p[5]);
compute_ci1_ci2(0.5f, 0.5f, h_args_p[4], h_args_p[6]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
hipDeviceSynchronize();
float h_sum = 0;
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nmni(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
hipMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nmni(N, d_array, d_warray, h_args_p, h_args_q, nb, nt);
hipDeviceSynchronize();
multiply(N, d_array, d_array2, d_warray, nb, nt);
hipDeviceSynchronize();
reduce(N, d_array2, h_sum, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
// printf("speedup = %f\n", htime / gtime);
hipFree(d_array);
hipFree(d_array2);
hipFree(d_warray);
}
// importance sampling with target distribution being the posterior distribution of the means of a
// Gaussian mixture model given 100 observations with known and shared variance, equal weights with
// uniform prior on (-10,10)^4. actual means are -3, 0, 3, and 6.
// proposal distribution is uniform (-10,10)^4.
void test_mix(int N, int nb, int nt) {
const int D = 4;
const int L = 100;
float sigma = 0.55f;
float mus[4];
mus[0] = -3;
mus[1] = 0;
mus[2] = 3;
mus[3] = 6;
float data_array[L];
generate_mix_data(D, sigma, mus, data_array, L);
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / D, c1, c2);
float h_args_p[L + 5];
h_args_p[0] = L;
for (int i = 0; i < L; i++) {
h_args_p[i + 1] = data_array[i];
}
h_args_p[L + 1] = c1;
h_args_p[L + 2] = c2;
h_args_p[L + 3] = -10;
h_args_p[L + 4] = 10;
float h_args_q[2];
h_args_q[0] = -10;
h_args_q[1] = 10;
// unsigned int hTimer;
// double time1, time2;
// cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians: Mean Inference\n");
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
populate_rand_d(d_array, N * D);
multiply(N * D, d_array, d_array, 20, nb, nt);
hipDeviceSynchronize();
add(N * D, d_array, d_array, -10, nb, nt);
hipDeviceSynchronize();
float* array = (float*) malloc(N * D * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
float sum[D];
double sumd[D];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
hipMemcpy(array, d_array, N * D * sizeof(float), hipMemcpyDeviceToHost);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_mgmu_mv(N, D, array, warray, h_args_p, h_args_q, 1);
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(array, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
printf("HOST RESULT = (%f, %f, %f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd, sumd[2] / sumwd,
sumd[3] / sumwd);
free(warray);
float* d_warray;
hipMalloc((void **) &d_warray, N * sizeof(float));
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_mgmu_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
hipDeviceSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
printf("RESULT = (%f, %f, %f, %f)\n", sum[0] / sumw, sum[1] / sumw, sum[2] / sumw, sum[3]
/ sumw);
hipFree(d_array);
hipFree(d_warray);
// printf("speedup = %f\n", time1 / time2);
}
int main(int argc, char **argv) {
seed_rng();
// int N = 1048576;
// int N = 131072;
// int N = 65536;
// int N = 16777216;
// int N = 4194304;
int N = 8388608;
int nb = 256;
int nt = 256;
test_mcgauss(N,nb,nt);
test_mcgauss_mv(N,nb,nt);
test_mixgauss(N,nb,nt);
// test_mix(N/32,nb,nt);
kill_rng();
}
| ea737e11016bccb24328c9d5e6b30b8afc72be8a.cu | /*
* test_mc.c
*
* Created on: 02-Feb-2009
* Author: alee
*/
#include <stdio.h>
#include "rng.h"
//#include <cutil.h>
#include "reduce.h"
#include "mc_gauss.h"
#include "mc_mix_gauss.h"
#include "mc_mix_gauss_mu.h"
#include "gauss.h"
#include "test_functions.h"
#include "mc_gauss_mv.h"
#include "mix_gauss.h"
#include "matrix.h"
#include "order.h"
void test_mcgaussmv_nolog(int N, int D, float* h_args_p, float* h_args_q, float* props, int nb, int nt) {
// unsigned int hTimer;
// double htime, gtime;
// cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 0);
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
cudaMemcpy(d_array, props, N * D * sizeof(float), cudaMemcpyHostToDevice);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 0, nb, nt);
cudaThreadSynchronize();
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
// printf("speedup = %f\n", htime / gtime);
cudaFree(d_array);
cudaFree(d_warray);
}
void test_mcgaussmv_log(int N, int D, float* h_args_p, float* h_args_q, float* props, int nb, int nt) {
// unsigned int hTimer;
// double htime,gtime;
// cutCreateTimer(&hTimer);
float* warray = (float*) malloc(N * sizeof(float));
float sum[2];
double sumd[2];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn_mv(N, D, props, warray, h_args_p, h_args_q, 1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(props, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = (%f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
cudaMemcpy(d_array, props, N * D * sizeof(float), cudaMemcpyHostToDevice);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
cudaThreadSynchronize();
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("RESULT = (%f,%f)\n", sum[0] / sumw, sum[1] / sumw);
// printf("speedup = %f\n", htime / gtime);
cudaFree(d_array);
cudaFree(d_warray);
}
// importance sampling with multivariate Gaussian proposal and target distributions
void test_mcgauss_mv(int N, int nb, int nt) {
const int D = 2;
printf("\nIS: Gaussian-Gaussian 2D\n");
float h_args_p[1 + D * D + D];
float cov_p[D * D];
matrix_set(cov_p, D, D, 0, 0, 1.0f);
matrix_set(cov_p, D, D, 0, 1, 0.5f);
matrix_set(cov_p, D, D, 1, 0, 0.5f);
matrix_set(cov_p, D, D, 1, 1, 2.0f);
compute_c1_c2(cov_p, D, h_args_p[0], h_args_p + 1);
h_args_p[5] = 1;
h_args_p[6] = 1;
float h_args_q[1 + D * D + D];
float cov_q[D * D];
matrix_set(cov_q, D, D, 0, 0, 1.0f);
matrix_set(cov_q, D, D, 0, 1, 0.0f);
matrix_set(cov_q, D, D, 1, 0, 0.0f);
matrix_set(cov_q, D, D, 1, 1, 1.0f);
compute_c1_c2(cov_q, D, h_args_q[0], h_args_q + 1);
h_args_q[5] = 0;
h_args_q[6] = 0;
float* array = (float*) malloc(N * D * sizeof(float));
populate_randn(array, N * D);
cudaThreadSynchronize();
test_mcgaussmv_nolog(N, D, h_args_p, h_args_q, array,nb,nt);
test_mcgaussmv_log(N, D, h_args_p, h_args_q, array,nb,nt);
free(array);
}
// importance sampling with univariate Gaussian proposal and target distributions
void test_mcgauss(int N, int nb, int nt) {
// unsigned int hTimer;
// double ctime, gtime;
// cutCreateTimer(&hTimer);
printf("\nIS: Gaussian-Gaussian 1D\n");
float h_args_p[3];
float h_args_q[3];
// p is N(2,0.25), q is N(0,1)
compute_c1_c2(0.5f, h_args_p[0], h_args_p[1]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_p[2] = 2;
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
float h_sum = 0;
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nn(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
// cutStopTimer(hTimer);
// ctime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", ctime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
cudaMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nn(N, d_array, d_warray, h_args_p, h_args_q, nb, nt);
cudaThreadSynchronize();
multiply(N, d_array, d_array2, d_warray, nb, nt);
cudaThreadSynchronize();
reduce(N, d_array2, h_sum, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
// printf("speedup = %f\n", ctime / gtime);
cudaFree(d_array);
cudaFree(d_array2);
cudaFree(d_warray);
}
// importance sampling with target distribution being a mixture of univariate Gaussians and
// proposal distribution being Gaussian
void test_mixgauss(int N, int nb, int nt) {
// unsigned int hTimer;
// double htime, gtime;
// cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians 1D\n");
const int k = 2;
float h_args_p[1 + 3 * k];
float h_args_q[3];
// p is N(2,2), q is N(0,1)
h_args_p[0] = k;
h_args_p[1] = 0;
h_args_p[2] = 3;
compute_ci1_ci2(0.5f, 0.5f, h_args_p[3], h_args_p[5]);
compute_ci1_ci2(0.5f, 0.5f, h_args_p[4], h_args_p[6]);
compute_c1_c2(1.0f, h_args_q[0], h_args_q[1]);
h_args_q[2] = 0;
float* array = (float*) malloc(N * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
populate_randn(array, N);
cudaThreadSynchronize();
float h_sum = 0;
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_nmni(N, array, warray, h_args_p, h_args_q);
for (int i = 0; i < N; i++) {
h_sum += array[i] * warray[i];
}
// cutStopTimer(hTimer);
// htime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", htime);
printf("HOST RESULT = %f\n", h_sum / N);
free(array);
free(warray);
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
float* d_array2;
cudaMalloc((void **) &d_array2, N * sizeof(float));
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
populate_randn_d(d_array, N);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_nmni(N, d_array, d_warray, h_args_p, h_args_q, nb, nt);
cudaThreadSynchronize();
multiply(N, d_array, d_array2, d_warray, nb, nt);
cudaThreadSynchronize();
reduce(N, d_array2, h_sum, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// gtime = cutGetTimerValue(hTimer);
// printf("Time = %f\n", gtime);
printf("RESULT = %f\n", h_sum / N);
// printf("speedup = %f\n", htime / gtime);
cudaFree(d_array);
cudaFree(d_array2);
cudaFree(d_warray);
}
// importance sampling with target distribution being the posterior distribution of the means of a
// Gaussian mixture model given 100 observations with known and shared variance, equal weights with
// uniform prior on (-10,10)^4. actual means are -3, 0, 3, and 6.
// proposal distribution is uniform (-10,10)^4.
void test_mix(int N, int nb, int nt) {
const int D = 4;
const int L = 100;
float sigma = 0.55f;
float mus[4];
mus[0] = -3;
mus[1] = 0;
mus[2] = 3;
mus[3] = 6;
float data_array[L];
generate_mix_data(D, sigma, mus, data_array, L);
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / D, c1, c2);
float h_args_p[L + 5];
h_args_p[0] = L;
for (int i = 0; i < L; i++) {
h_args_p[i + 1] = data_array[i];
}
h_args_p[L + 1] = c1;
h_args_p[L + 2] = c2;
h_args_p[L + 3] = -10;
h_args_p[L + 4] = 10;
float h_args_q[2];
h_args_q[0] = -10;
h_args_q[1] = 10;
// unsigned int hTimer;
// double time1, time2;
// cutCreateTimer(&hTimer);
printf("\nIS: Mixture of Gaussians: Mean Inference\n");
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
populate_rand_d(d_array, N * D);
multiply(N * D, d_array, d_array, 20, nb, nt);
cudaThreadSynchronize();
add(N * D, d_array, d_array, -10, nb, nt);
cudaThreadSynchronize();
float* array = (float*) malloc(N * D * sizeof(float));
float* warray = (float*) malloc(N * sizeof(float));
float sum[D];
double sumd[D];
float sumw = 0;
double sumwd = 0;
for (int j = 0; j < D; j++) {
sum[j] = 0;
sumd[j] = 0;
}
cudaMemcpy(array, d_array, N * D * sizeof(float), cudaMemcpyDeviceToHost);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_ref_mgmu_mv(N, D, array, warray, h_args_p, h_args_q, 1);
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
float maxlw = warray[0];
for (int i = 1; i < N; i++) {
maxlw = max(maxlw, warray[i]);
}
for (int i = 0; i < N; i++) {
warray[i] -= maxlw;
warray[i] = exp(warray[i]);
for (int j = 0; j < D; j++) {
sumd[j] += warray[i] * vector_get(array, D, i)[j];
}
sumwd += warray[i];
}
// cutStopTimer(hTimer);
// time1 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time1);
printf("HOST RESULT = (%f, %f, %f, %f)\n", sumd[0] / sumwd, sumd[1] / sumwd, sumd[2] / sumwd,
sumd[3] / sumwd);
free(warray);
float* d_warray;
cudaMalloc((void **) &d_warray, N * sizeof(float));
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
is_mgmu_mv(N, D, d_array, d_warray, h_args_p, h_args_q, 1, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
maximum(N, d_warray, maxlw, nb, nt);
add(N, d_warray, d_warray, -maxlw, nb, nt);
exp(N, d_warray, d_warray, nb, nt);
multiply(N, D, d_array, d_array, d_warray, nb, nt);
reduce(N, d_warray, sumw, nb, nt);
reduce(N, D, d_array, sum, nb, nt);
cudaThreadSynchronize();
// cutStopTimer(hTimer);
// time2 = cutGetTimerValue(hTimer);
// printf("Time = %f\n", time2);
printf("RESULT = (%f, %f, %f, %f)\n", sum[0] / sumw, sum[1] / sumw, sum[2] / sumw, sum[3]
/ sumw);
cudaFree(d_array);
cudaFree(d_warray);
// printf("speedup = %f\n", time1 / time2);
}
int main(int argc, char **argv) {
seed_rng();
// int N = 1048576;
// int N = 131072;
// int N = 65536;
// int N = 16777216;
// int N = 4194304;
int N = 8388608;
int nb = 256;
int nt = 256;
test_mcgauss(N,nb,nt);
test_mcgauss_mv(N,nb,nt);
test_mixgauss(N,nb,nt);
// test_mix(N/32,nb,nt);
kill_rng();
}
|
9ae6c8526f27d8863e858d4a0429419f706c4602.hip | // !!! This is a file automatically generated by hipify!!!
// tests hipEventCreate
#include <iostream>
#include <memory>
using namespace std;
#include <hip/hip_runtime.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void test1() {
const int N = 102400;
hipStream_t stream;
hipStreamCreate__(&stream, 0);
cout << "got stream" << endl;
float *hostFloats;
hipHostMalloc((void **)&hostFloats, N * sizeof(float), HIP_MEMHOSTALLOC_PORTABLE);
hipDeviceptr_t deviceFloats;
cuMemAlloc(&deviceFloats, N * sizeof(float));
hipLaunchKernelGGL(( longKernel), dim3(dim3(102400 / 32, 1, 1)), dim3(dim3(32, 1, 1)), 0, stream, (float *)deviceFloats, N, 3.0f);
cout << "queued kernel 1" << endl;
hipEvent_t event;
hipEventCreate(&event, hipEventDisableTiming);
hipEventRecord(event, stream);
hipStreamWaitEvent(stream, event, 0);
hipLaunchKernelGGL(( longKernel), dim3(dim3(102400 / 32, 1, 1)), dim3(dim3(32, 1, 1)), 0, stream, (float *)deviceFloats, N, 3.0f);
cout << "queued kernel 2" << endl;
// hipCtxSynchronize();
hipStreamSynchronize(stream);
cout << "finished" << endl;
hipEventDestroy(event);
hipHostFree(hostFloats);
hipFree(deviceFloats);
hipStreamDestroy(stream );
}
void dump(float *M, int N) {
for(int row=0; row < N; row++) {
cout << " " << M[row];
}
cout << endl;
}
void fill(float *M, int N, float val) {
for(int row=0; row < N; row++) {
M[row] = val;
}
}
void test2() {
// use a long running kernel, queue an async copy back from device
// => returned values should, in theory, be correct...
const int N = 102400;
hipStream_t stream;
hipStreamCreate__(&stream, 0);
float hostFloats[N];
hipDeviceptr_t deviceFloats;
cout << "call cumemalloc" << endl;
cuMemAlloc(&deviceFloats, N * sizeof(float));
cout << "cumemalloc done" << endl;
fill(hostFloats, 10, 123);
dump(hostFloats, 10);
cout << "calling cuMemcpyHtoDAsync" << endl;
cuMemcpyHtoDAsync((hipDeviceptr_t)(((float *)deviceFloats)), hostFloats, N * sizeof(float), stream);
cout << "cuMemcpyHtoDAsync done" << endl;
hipLaunchKernelGGL(( longKernel), dim3(dim3(102400 / 32, 1, 1)), dim3(dim3(32, 1, 1)), 0, stream, (float *)deviceFloats, N, 3.0f);
cout << "queued kernel" << endl;
cuMemcpyDtoHAsync(hostFloats, (hipDeviceptr_t)((float *)deviceFloats), N * sizeof(float), stream);
cout << "queued async copy" << endl;
hipStreamSynchronize(stream);
dump(hostFloats, 10);
hipFree(deviceFloats);
hipStreamDestroy(stream);
}
int main(int argc, char *argv[]) {
cout << "test1" << endl;
test1();
cout << "test2" << endl;
test2();
return 0;
}
| 9ae6c8526f27d8863e858d4a0429419f706c4602.cu | // tests cuEventCreate
#include <iostream>
#include <memory>
using namespace std;
#include <cuda.h>
__global__ void longKernel(float *data, int N, float value) {
for(int i = 0; i < N; i++) {
data[i] += value;
}
}
void test1() {
const int N = 102400;
CUstream stream;
cuStreamCreate(&stream, 0);
cout << "got stream" << endl;
float *hostFloats;
cuMemHostAlloc((void **)&hostFloats, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats;
cuMemAlloc(&deviceFloats, N * sizeof(float));
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f);
cout << "queued kernel 1" << endl;
CUevent event;
cuEventCreate(&event, CU_EVENT_DISABLE_TIMING);
cuEventRecord(event, stream);
cuStreamWaitEvent(stream, event, 0);
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f);
cout << "queued kernel 2" << endl;
// cuCtxSynchronize();
cuStreamSynchronize(stream);
cout << "finished" << endl;
cuEventDestroy(event);
cuMemFreeHost(hostFloats);
cuMemFree(deviceFloats);
cuStreamDestroy(stream );
}
void dump(float *M, int N) {
for(int row=0; row < N; row++) {
cout << " " << M[row];
}
cout << endl;
}
void fill(float *M, int N, float val) {
for(int row=0; row < N; row++) {
M[row] = val;
}
}
void test2() {
// use a long running kernel, queue an async copy back from device
// => returned values should, in theory, be correct...
const int N = 102400;
CUstream stream;
cuStreamCreate(&stream, 0);
float hostFloats[N];
CUdeviceptr deviceFloats;
cout << "call cumemalloc" << endl;
cuMemAlloc(&deviceFloats, N * sizeof(float));
cout << "cumemalloc done" << endl;
fill(hostFloats, 10, 123);
dump(hostFloats, 10);
cout << "calling cuMemcpyHtoDAsync" << endl;
cuMemcpyHtoDAsync((CUdeviceptr)(((float *)deviceFloats)), hostFloats, N * sizeof(float), stream);
cout << "cuMemcpyHtoDAsync done" << endl;
longKernel<<<dim3(102400 / 32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, N, 3.0f);
cout << "queued kernel" << endl;
cuMemcpyDtoHAsync(hostFloats, (CUdeviceptr)((float *)deviceFloats), N * sizeof(float), stream);
cout << "queued async copy" << endl;
cuStreamSynchronize(stream);
dump(hostFloats, 10);
cuMemFree(deviceFloats);
cuStreamDestroy(stream);
}
int main(int argc, char *argv[]) {
cout << "test1" << endl;
test1();
cout << "test2" << endl;
test2();
return 0;
}
|
0344fe99abc9203bfb546c9168f72bbf6d94992a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Matrix multiplication using shared and non shared kernal
/*
nvcc -arch=sm_60 -O2 test1.cu -o ./test1.x
nvprof ./test1.x
*/
#include <stdio.h>
#include <math.h>
#include "matrix_lib.h"
#define TILE_WIDTH 2
#define ROWN 6
//Need a convert function here from 2D to 1D for the host version, then we can compare, make it in the lib function
//Serial
void cpuMatMul(float *x, float *y, float *ans, const int N)
{
for(int i=0;i<N;i++) //row
{
for(int j=0;j<N;j++) //row
{
for(int k=0;k<N;k++) //col
{
ans[i*N + j] += (x[i*N+k] * y[k*N+j]);
}
}
}
}
//non shared
__global__ void
MatrixMul( float *x , float *y , float *ans , const int N )
{
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
for (int k = 0 ; k<N ; k++ )
{
ans[row*N + col]+= x[row * N + k ] * y[ k * N + col] ;
}
}
// shared
__global__ void
MatrixMulSh( float *x , float *y , float *ans , const int N )
{
__shared__ float xs [TILE_WIDTH][TILE_WIDTH] ;
__shared__ float ys [TILE_WIDTH][TILE_WIDTH] ;
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
for (int m = 0 ; m < N/TILE_WIDTH ; m++ ) // m indicate number of phase
{
xs[threadIdx.y][threadIdx.x] = x[row*N + (m*TILE_WIDTH + threadIdx.x)] ;
ys[threadIdx.y][threadIdx.x] = y[ (m*TILE_WIDTH + threadIdx.y) * N + col] ;
__syncthreads() ; // for syncronizeing the threads
// Do for tile
for ( int k = 0; k<TILE_WIDTH ; k++ )
ans[row*N + col]+= xs[threadIdx.x][k] * ys[k][threadIdx.y] ;
__syncthreads() ; // for syncronizeing the threads
}
}
// main routine
int main ()
{
const int WIDTH = ROWN ;
float array1_h[WIDTH][WIDTH] ,
array2_h[WIDTH][WIDTH],
result_array_h[WIDTH][WIDTH] ,
M_result_array_h[WIDTH][WIDTH] ;
float *h_array1, *h_array2, *h_result_array;
float *array1_d , *array2_d ,*result_array_d ,*M_result_array_d ; // device array
int i , j ;
//input in host array
for ( i = 0 ; i<WIDTH ; i++ )
{
for (j = 0 ; j<WIDTH ; j++ )
{
array1_h[i][j] = 1 ;
array2_h[i][j] = 2 ;
}
}
//create device array hipMalloc ( (void **)&array_name, sizeofmatrixinbytes) ;
hipMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ;
hipMalloc((void **) &array2_d , WIDTH*WIDTH*sizeof (int) ) ;
//copy host array to device array; hipMemcpy ( dest , source , WIDTH , direction )
hipMemcpy ( array1_d , array1_h , WIDTH*WIDTH*sizeof (int) , hipMemcpyHostToDevice ) ;
hipMemcpy ( array2_d , array2_h , WIDTH*WIDTH*sizeof (int) , hipMemcpyHostToDevice ) ;
//allocating memory for resultent device array
hipMalloc((void **) &result_array_d , WIDTH*WIDTH*sizeof (int) );
hipMalloc((void **) &M_result_array_d , WIDTH*WIDTH*sizeof (int) );
//calling kernal
dim3 dimGrid ( WIDTH/TILE_WIDTH , WIDTH/TILE_WIDTH ,1 ) ;
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ;
// Change if 0 to if 1 for running non shared code and make if 0 for shared memory code
#if 1
hipLaunchKernelGGL(( MatrixMul) , dim3(dimGrid),dim3(dimBlock), 0, 0, array1_d , array2_d ,M_result_array_d , WIDTH) ;
#endif
#if 0
hipLaunchKernelGGL(( MatrixMulSh), dim3(dimGrid),dim3(dimBlock), 0, 0, array1_d , array2_d ,M_result_array_d , WIDTH) ;
#endif
//cpuMatMul(h_array1, h_array2, h_result_array , WIDTH) ; //compare host and device versions
//Need some convention here
// all gpu function blocked till kernel is working
//copy back result_array_d to result_array_h
hipMemcpy(M_result_array_h , M_result_array_d , WIDTH*WIDTH*sizeof(int) ,
hipMemcpyDeviceToHost) ;
//printf the result array
for ( i = 0 ; i<WIDTH ; i++ )
{
for ( j = 0 ; j < WIDTH ; j++ )
{
printf ("%f ",M_result_array_h[i][j] ) ;
}
printf ("\n") ;
}
//system("pause") ;
}
| 0344fe99abc9203bfb546c9168f72bbf6d94992a.cu | //Matrix multiplication using shared and non shared kernal
/*
nvcc -arch=sm_60 -O2 test1.cu -o ./test1.x
nvprof ./test1.x
*/
#include <stdio.h>
#include <math.h>
#include "matrix_lib.h"
#define TILE_WIDTH 2
#define ROWN 6
//Need a convert function here from 2D to 1D for the host version, then we can compare, make it in the lib function
//Serial
void cpuMatMul(float *x, float *y, float *ans, const int N)
{
for(int i=0;i<N;i++) //row
{
for(int j=0;j<N;j++) //row
{
for(int k=0;k<N;k++) //col
{
ans[i*N + j] += (x[i*N+k] * y[k*N+j]);
}
}
}
}
//non shared
__global__ void
MatrixMul( float *x , float *y , float *ans , const int N )
{
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
for (int k = 0 ; k<N ; k++ )
{
ans[row*N + col]+= x[row * N + k ] * y[ k * N + col] ;
}
}
// shared
__global__ void
MatrixMulSh( float *x , float *y , float *ans , const int N )
{
__shared__ float xs [TILE_WIDTH][TILE_WIDTH] ;
__shared__ float ys [TILE_WIDTH][TILE_WIDTH] ;
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ;
for (int m = 0 ; m < N/TILE_WIDTH ; m++ ) // m indicate number of phase
{
xs[threadIdx.y][threadIdx.x] = x[row*N + (m*TILE_WIDTH + threadIdx.x)] ;
ys[threadIdx.y][threadIdx.x] = y[ (m*TILE_WIDTH + threadIdx.y) * N + col] ;
__syncthreads() ; // for syncronizeing the threads
// Do for tile
for ( int k = 0; k<TILE_WIDTH ; k++ )
ans[row*N + col]+= xs[threadIdx.x][k] * ys[k][threadIdx.y] ;
__syncthreads() ; // for syncronizeing the threads
}
}
// main routine
int main ()
{
const int WIDTH = ROWN ;
float array1_h[WIDTH][WIDTH] ,
array2_h[WIDTH][WIDTH],
result_array_h[WIDTH][WIDTH] ,
M_result_array_h[WIDTH][WIDTH] ;
float *h_array1, *h_array2, *h_result_array;
float *array1_d , *array2_d ,*result_array_d ,*M_result_array_d ; // device array
int i , j ;
//input in host array
for ( i = 0 ; i<WIDTH ; i++ )
{
for (j = 0 ; j<WIDTH ; j++ )
{
array1_h[i][j] = 1 ;
array2_h[i][j] = 2 ;
}
}
//create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) ;
cudaMalloc((void **) &array1_d , WIDTH*WIDTH*sizeof (int) ) ;
cudaMalloc((void **) &array2_d , WIDTH*WIDTH*sizeof (int) ) ;
//copy host array to device array; cudaMemcpy ( dest , source , WIDTH , direction )
cudaMemcpy ( array1_d , array1_h , WIDTH*WIDTH*sizeof (int) , cudaMemcpyHostToDevice ) ;
cudaMemcpy ( array2_d , array2_h , WIDTH*WIDTH*sizeof (int) , cudaMemcpyHostToDevice ) ;
//allocating memory for resultent device array
cudaMalloc((void **) &result_array_d , WIDTH*WIDTH*sizeof (int) );
cudaMalloc((void **) &M_result_array_d , WIDTH*WIDTH*sizeof (int) );
//calling kernal
dim3 dimGrid ( WIDTH/TILE_WIDTH , WIDTH/TILE_WIDTH ,1 ) ;
dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ;
// Change if 0 to if 1 for running non shared code and make if 0 for shared memory code
#if 1
MatrixMul <<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH) ;
#endif
#if 0
MatrixMulSh<<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH) ;
#endif
//cpuMatMul(h_array1, h_array2, h_result_array , WIDTH) ; //compare host and device versions
//Need some convention here
// all gpu function blocked till kernel is working
//copy back result_array_d to result_array_h
cudaMemcpy(M_result_array_h , M_result_array_d , WIDTH*WIDTH*sizeof(int) ,
cudaMemcpyDeviceToHost) ;
//printf the result array
for ( i = 0 ; i<WIDTH ; i++ )
{
for ( j = 0 ; j < WIDTH ; j++ )
{
printf ("%f ",M_result_array_h[i][j] ) ;
}
printf ("\n") ;
}
//system("pause") ;
}
|
ab179437de18031648339824ca9c3cec9504dd64.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Tests for max pool layer.
// Created: 02/07/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/testmaxpoollayer.cuh"
TestMaxPoolLayer::TestMaxPoolLayer(string outputFolder)
{
m_outputFolder = outputFolder;
// Registering tests.
m_maxPoolLayerTests["doforwardprop"] = &TestMaxPoolLayer::TestDoForwardProp;
m_maxPoolLayerTests["dobackwardprop"] = &TestMaxPoolLayer::TestDoBackwardProp;
}
bool TestMaxPoolLayer::HasTest(string testName)
{
auto test = m_maxPoolLayerTests.find(testName);
return test != m_maxPoolLayerTests.end();
}
void TestMaxPoolLayer::RunTest(string testName)
{
auto test = m_maxPoolLayerTests.find(testName);
TestingAssert(test != m_maxPoolLayerTests.end(), "Test not found!");
((*this).*(test->second))();
}
void TestMaxPoolLayer::RunAllTests()
{
for (auto test = m_maxPoolLayerTests.begin(); test != m_maxPoolLayerTests.end(); ++test)
{
((*this).*(test->second))();
s_consoleHelper.SetConsoleForeground(ConsoleForeground::GREEN);
cout << "Test " << test->first << " passed!" << endl << endl;
s_consoleHelper.RevertConsoleForeground();
}
}
//******************************************************************************************************
// Helper functions
//******************************************************************************************************
void TestMaxPoolLayer::TestDoForwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint unitWidth,
uint unitHeight, int paddingLeft, int paddingTop, uint unitStride)
{
// Creating layers.
MockInputLayer mockInputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockMaxPoolLayer mockMaxPoolLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, unitWidth, unitHeight, paddingLeft, paddingTop, unitStride);
mockMaxPoolLayer.AddPrevLayer(&mockInputLayer);
MaxPoolLayer maxPoolLayer(ParallelismMode::Data, 0, 0, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false, unitWidth, unitHeight,
paddingLeft, paddingTop, unitStride, false);
maxPoolLayer.AddPrevLayer(&mockInputLayer);
// Doing forward prop.
PropagationMode propagationMode = PropagationMode::Train;
mockInputLayer.LoadInputs();
mockInputLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.LoadInputs();
maxPoolLayer.LoadInputs();
maxPoolLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.DoForwardProp(propagationMode);
CudaAssert(hipDeviceSynchronize());
// Transferring results to host.
size_t activationsBufferSize = mockMaxPoolLayer.GetActivationBufferSize();
float* maxPoolLayerActivationBuffer;
CudaAssert(hipHostMalloc<float>(&maxPoolLayerActivationBuffer, activationsBufferSize));
CudaAssert(hipMemcpy(maxPoolLayerActivationBuffer, maxPoolLayer.GetActivationDataBuffer(), activationsBufferSize, hipMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t activationsBufferLength = activationsBufferSize / sizeof(float);
const float* mockMaxPoolLayerActivationBuffer = mockMaxPoolLayer.GetActivationDataBuffer();
const float maxDiff = 0.0001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.000001f;
CompareBuffers(maxPoolLayerActivationBuffer, mockMaxPoolLayerActivationBuffer, activationsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(hipHostFree(maxPoolLayerActivationBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock max pool activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(foundDifferentFromZeroReg, "All max pool activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(correctResult, "Incorrect forward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock activation: " + to_string(firstDifferentMock) + "; First different regular activation: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) +
"; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
cout << "Forward prop passed. Input num channels: " << inputNumChannels << "; Input data count: " << inputDataCount << "; Unit width: " << unitWidth <<
"; Padding left: " << paddingLeft << "; Unit stride: " << unitStride << endl;
}
void TestMaxPoolLayer::TestDoBackwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint unitWidth,
uint unitHeight, int paddingLeft, int paddingTop, uint unitStride)
{
// Creating layers.
MockInputLayer mockInputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockMaxPoolLayer mockMaxPoolLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, unitWidth, unitHeight, paddingLeft, paddingTop, unitStride);
mockMaxPoolLayer.AddPrevLayer(&mockInputLayer);
MaxPoolLayer maxPoolLayer(ParallelismMode::Data, 0, 0, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false, unitWidth, unitHeight,
paddingLeft, paddingTop, unitStride, false);
maxPoolLayer.AddPrevLayer(&mockInputLayer);
MockOutputLayer mockOutputLayer(maxPoolLayer.GetActivationDataSize() * maxPoolLayer.GetActivationNumChannels(), inputDataCount, LossFunctionType::LogisticRegression, false, 0, true);
mockMaxPoolLayer.AddNextLayer(&mockOutputLayer);
maxPoolLayer.AddNextLayer(&mockOutputLayer);
// Doing forward and backward prop.
PropagationMode propagationMode = PropagationMode::Train;
mockInputLayer.LoadInputs();
mockInputLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.LoadInputs();
maxPoolLayer.LoadInputs();
maxPoolLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.DoForwardProp(propagationMode);
mockOutputLayer.DoBackwardProp();
maxPoolLayer.LoadActivationGradients();
maxPoolLayer.DoBackwardProp();
mockMaxPoolLayer.LoadActivationGradients();
mockMaxPoolLayer.DoBackwardProp();
CudaAssert(hipDeviceSynchronize());
// Transferring results to host.
size_t inputGradientsBufferSize = mockInputLayer.GetActivationBufferSize();
float* maxPoolLayerInputGradientsBuffer;
CudaAssert(hipHostMalloc<float>(&maxPoolLayerInputGradientsBuffer, inputGradientsBufferSize));
CudaAssert(hipMemcpy(maxPoolLayerInputGradientsBuffer, maxPoolLayer.GetInputGradientsBuffer(), inputGradientsBufferSize, hipMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t inputGradientsBufferLength = inputGradientsBufferSize / sizeof(float);
const float* mockMaxPoolLayerInputGradientsBuffer = mockMaxPoolLayer.GetInputGradientsBuffer();
const float maxDiff = 0.0001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.000001f;
CompareBuffers(maxPoolLayerInputGradientsBuffer, mockMaxPoolLayerInputGradientsBuffer, inputGradientsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
for (size_t i = 0; i < inputGradientsBufferLength; ++i)
{
if (abs(maxPoolLayerInputGradientsBuffer[i] - mockMaxPoolLayerInputGradientsBuffer[i]) > 0.00001f)
{
cout << "obican: " << maxPoolLayerInputGradientsBuffer[i] << " , " << "mock: " << mockMaxPoolLayerInputGradientsBuffer[i] << endl;
cout << i << endl;
}
}
CudaAssert(hipHostFree(maxPoolLayerInputGradientsBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock max pool input gradients are zeros! Input num channels: " + to_string(inputNumChannels) +
"; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) +
"; Unit stride: " + to_string(unitStride));
TestingAssert(foundDifferentFromZeroReg, "All max pool input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(correctResult, "Incorrect backward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock input gradient: " + to_string(firstDifferentMock) + "; First different regular input gradient: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) +
"; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
cout << "Backward prop passed. Input num channels: " << inputNumChannels << "; Input data count: " << inputDataCount << "; Unit width: " << unitWidth <<
"; Padding left: " << paddingLeft << "; Unit stride: " << unitStride << endl;
}
//******************************************************************************************************
// Tests
//******************************************************************************************************
void TestMaxPoolLayer::TestDoForwardProp()
{
// lastBatch == true
// m_inputNumChannels % 16 == 0
TestDoForwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 33 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 33 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 57 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 17 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// lastBatch == false
// m_inputDataCount % 128 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 128 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 64 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 64 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 32 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 32 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
}
void TestMaxPoolLayer::TestDoBackwardProp()
{
// lastBatch == true
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 33 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 33 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
// TODO: Currently unsupported, uncomment here and below if you support this one day.
//TestDoBackwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 57 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 17 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// lastBatch == false
// m_inputDataCount % 128 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 128 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 64 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 64 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 32 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 32 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
} | ab179437de18031648339824ca9c3cec9504dd64.cu | // ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Tests for max pool layer.
// Created: 02/07/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/testmaxpoollayer.cuh"
TestMaxPoolLayer::TestMaxPoolLayer(string outputFolder)
{
m_outputFolder = outputFolder;
// Registering tests.
m_maxPoolLayerTests["doforwardprop"] = &TestMaxPoolLayer::TestDoForwardProp;
m_maxPoolLayerTests["dobackwardprop"] = &TestMaxPoolLayer::TestDoBackwardProp;
}
bool TestMaxPoolLayer::HasTest(string testName)
{
auto test = m_maxPoolLayerTests.find(testName);
return test != m_maxPoolLayerTests.end();
}
void TestMaxPoolLayer::RunTest(string testName)
{
auto test = m_maxPoolLayerTests.find(testName);
TestingAssert(test != m_maxPoolLayerTests.end(), "Test not found!");
((*this).*(test->second))();
}
void TestMaxPoolLayer::RunAllTests()
{
for (auto test = m_maxPoolLayerTests.begin(); test != m_maxPoolLayerTests.end(); ++test)
{
((*this).*(test->second))();
s_consoleHelper.SetConsoleForeground(ConsoleForeground::GREEN);
cout << "Test " << test->first << " passed!" << endl << endl;
s_consoleHelper.RevertConsoleForeground();
}
}
//******************************************************************************************************
// Helper functions
//******************************************************************************************************
void TestMaxPoolLayer::TestDoForwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint unitWidth,
uint unitHeight, int paddingLeft, int paddingTop, uint unitStride)
{
// Creating layers.
MockInputLayer mockInputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockMaxPoolLayer mockMaxPoolLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, unitWidth, unitHeight, paddingLeft, paddingTop, unitStride);
mockMaxPoolLayer.AddPrevLayer(&mockInputLayer);
MaxPoolLayer maxPoolLayer(ParallelismMode::Data, 0, 0, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false, unitWidth, unitHeight,
paddingLeft, paddingTop, unitStride, false);
maxPoolLayer.AddPrevLayer(&mockInputLayer);
// Doing forward prop.
PropagationMode propagationMode = PropagationMode::Train;
mockInputLayer.LoadInputs();
mockInputLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.LoadInputs();
maxPoolLayer.LoadInputs();
maxPoolLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.DoForwardProp(propagationMode);
CudaAssert(cudaDeviceSynchronize());
// Transferring results to host.
size_t activationsBufferSize = mockMaxPoolLayer.GetActivationBufferSize();
float* maxPoolLayerActivationBuffer;
CudaAssert(cudaMallocHost<float>(&maxPoolLayerActivationBuffer, activationsBufferSize));
CudaAssert(cudaMemcpy(maxPoolLayerActivationBuffer, maxPoolLayer.GetActivationDataBuffer(), activationsBufferSize, cudaMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t activationsBufferLength = activationsBufferSize / sizeof(float);
const float* mockMaxPoolLayerActivationBuffer = mockMaxPoolLayer.GetActivationDataBuffer();
const float maxDiff = 0.0001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.000001f;
CompareBuffers(maxPoolLayerActivationBuffer, mockMaxPoolLayerActivationBuffer, activationsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
CudaAssert(cudaFreeHost(maxPoolLayerActivationBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock max pool activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(foundDifferentFromZeroReg, "All max pool activations are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(correctResult, "Incorrect forward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock activation: " + to_string(firstDifferentMock) + "; First different regular activation: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) +
"; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
cout << "Forward prop passed. Input num channels: " << inputNumChannels << "; Input data count: " << inputDataCount << "; Unit width: " << unitWidth <<
"; Padding left: " << paddingLeft << "; Unit stride: " << unitStride << endl;
}
void TestMaxPoolLayer::TestDoBackwardProp(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint unitWidth,
uint unitHeight, int paddingLeft, int paddingTop, uint unitStride)
{
// Creating layers.
MockInputLayer mockInputLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount);
MockMaxPoolLayer mockMaxPoolLayer(inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, unitWidth, unitHeight, paddingLeft, paddingTop, unitStride);
mockMaxPoolLayer.AddPrevLayer(&mockInputLayer);
MaxPoolLayer maxPoolLayer(ParallelismMode::Data, 0, 0, 0, 1, inputNumChannels, inputDataWidth, inputDataHeight, inputDataCount, false, unitWidth, unitHeight,
paddingLeft, paddingTop, unitStride, false);
maxPoolLayer.AddPrevLayer(&mockInputLayer);
MockOutputLayer mockOutputLayer(maxPoolLayer.GetActivationDataSize() * maxPoolLayer.GetActivationNumChannels(), inputDataCount, LossFunctionType::LogisticRegression, false, 0, true);
mockMaxPoolLayer.AddNextLayer(&mockOutputLayer);
maxPoolLayer.AddNextLayer(&mockOutputLayer);
// Doing forward and backward prop.
PropagationMode propagationMode = PropagationMode::Train;
mockInputLayer.LoadInputs();
mockInputLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.LoadInputs();
maxPoolLayer.LoadInputs();
maxPoolLayer.DoForwardProp(propagationMode);
mockMaxPoolLayer.DoForwardProp(propagationMode);
mockOutputLayer.DoBackwardProp();
maxPoolLayer.LoadActivationGradients();
maxPoolLayer.DoBackwardProp();
mockMaxPoolLayer.LoadActivationGradients();
mockMaxPoolLayer.DoBackwardProp();
CudaAssert(cudaDeviceSynchronize());
// Transferring results to host.
size_t inputGradientsBufferSize = mockInputLayer.GetActivationBufferSize();
float* maxPoolLayerInputGradientsBuffer;
CudaAssert(cudaMallocHost<float>(&maxPoolLayerInputGradientsBuffer, inputGradientsBufferSize));
CudaAssert(cudaMemcpy(maxPoolLayerInputGradientsBuffer, maxPoolLayer.GetInputGradientsBuffer(), inputGradientsBufferSize, cudaMemcpyDeviceToHost));
// Checking correctness.
bool correctResult = true;
size_t numDifferences = 0;
float firstDifference = 0.f;
float firstDifferentMock = 0.f;
float firstDifferentReg = 0.f;
bool foundDifferentFromZeroMock = false;
bool foundDifferentFromZeroReg = false;
size_t inputGradientsBufferLength = inputGradientsBufferSize / sizeof(float);
const float* mockMaxPoolLayerInputGradientsBuffer = mockMaxPoolLayer.GetInputGradientsBuffer();
const float maxDiff = 0.0001f;
const float maxDiffPercentage = 0.001f;
const float maxDiffPercentageThreshold = 0.000001f;
CompareBuffers(maxPoolLayerInputGradientsBuffer, mockMaxPoolLayerInputGradientsBuffer, inputGradientsBufferLength, maxDiff, maxDiffPercentage, maxDiffPercentageThreshold,
correctResult, numDifferences, firstDifference, firstDifferentMock, firstDifferentReg, foundDifferentFromZeroMock, foundDifferentFromZeroReg);
for (size_t i = 0; i < inputGradientsBufferLength; ++i)
{
if (abs(maxPoolLayerInputGradientsBuffer[i] - mockMaxPoolLayerInputGradientsBuffer[i]) > 0.00001f)
{
cout << "obican: " << maxPoolLayerInputGradientsBuffer[i] << " , " << "mock: " << mockMaxPoolLayerInputGradientsBuffer[i] << endl;
cout << i << endl;
}
}
CudaAssert(cudaFreeHost(maxPoolLayerInputGradientsBuffer));
TestingAssert(foundDifferentFromZeroMock, "All mock max pool input gradients are zeros! Input num channels: " + to_string(inputNumChannels) +
"; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) +
"; Unit stride: " + to_string(unitStride));
TestingAssert(foundDifferentFromZeroReg, "All max pool input gradients are zeros! Input num channels: " + to_string(inputNumChannels) + "; Input data count: " +
to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) + "; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
TestingAssert(correctResult, "Incorrect backward prop! Num differences: " + to_string(numDifferences) + "; First difference: " + to_string(firstDifference) +
"; First different mock input gradient: " + to_string(firstDifferentMock) + "; First different regular input gradient: " + to_string(firstDifferentReg) +
"; Input num channels: " + to_string(inputNumChannels) + "; Input data count: " + to_string(inputDataCount) + "; Unit width: " + to_string(unitWidth) +
"; Padding left: " + to_string(paddingLeft) + "; Unit stride: " + to_string(unitStride));
cout << "Backward prop passed. Input num channels: " << inputNumChannels << "; Input data count: " << inputDataCount << "; Unit width: " << unitWidth <<
"; Padding left: " << paddingLeft << "; Unit stride: " << unitStride << endl;
}
//******************************************************************************************************
// Tests
//******************************************************************************************************
void TestMaxPoolLayer::TestDoForwardProp()
{
// lastBatch == true
// m_inputNumChannels % 16 == 0
TestDoForwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 33 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 33 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 57 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 17 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// lastBatch == false
// m_inputDataCount % 128 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 128 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 64 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 64 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 32 == 0
// m_inputNumChannels % 16 == 0
TestDoForwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 32 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
TestDoForwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoForwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
}
void TestMaxPoolLayer::TestDoBackwardProp()
{
// lastBatch == true
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 33 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 33 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
// TODO: Currently unsupported, uncomment here and below if you support this one day.
//TestDoBackwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 57 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 224 /*inputDataWidth*/, 224 /*inputDataHeight*/, 17 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// lastBatch == false
// m_inputDataCount % 128 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(64 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(128 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 128 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 77 /*inputDataWidth*/, 77 /*inputDataHeight*/, 128 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 64 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 64 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 150 /*inputDataWidth*/, 150 /*inputDataHeight*/, 64 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
// m_inputDataCount % 32 == 0
// m_inputNumChannels % 16 == 0
TestDoBackwardProp(128 /*inputNumChannels*/, 55 /*inputDataWidth*/, 55 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
TestDoBackwardProp(256 /*inputNumChannels*/, 27 /*inputDataWidth*/, 27 /*inputDataHeight*/, 32 /*inputDataCount*/, 4 /*unitWidth*/, 4 /*unitHeight*/,
1 /*paddingLeft*/, 1 /*paddingTop*/, 2 /*unitStride*/);
// m_inputNumChannels % 16 != 0
//TestDoBackwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 0 /*paddingLeft*/, 0 /*paddingTop*/, 2 /*unitStride*/);
//TestDoBackwardProp(3 /*inputNumChannels*/, 201 /*inputDataWidth*/, 201 /*inputDataHeight*/, 32 /*inputDataCount*/, 3 /*unitWidth*/, 3 /*unitHeight*/,
// 1 /*paddingLeft*/, 1 /*paddingTop*/, 1 /*unitStride*/);
} |
eb154b22428e44cfe66f1c2916e94197c3951996.hip | // !!! This is a file automatically generated by hipify!!!
// ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
options.N = options.nx*options.ny;
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = true;
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
int nx = options.nx;
int ny = options.ny;
int nt = options.nt;
// initialize cuda
int device_count;
cuda_check_status( hipGetDeviceCount(&device_count) );
if(device_count < 1) {
std::cerr << "error: there should be at least one device per node" << std::endl;
exit(-1);
}
cuda_check_status( hipSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
std::cout << "========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with CUDA" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================" << std::endl;
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0.);
ss_fill(bndS, 0.);
ss_fill(bndE, 0.);
ss_fill(bndW, 0.);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (ny - 1) * options.dx / 4;
double radius = fmin(xc, yc) / 2.0;
for (int j = 0; j < ny; j++)
{
double y = (j - 1) * options.dx;
for (int i = 0; i < nx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new[i+nx*j] = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
flops_bc = 0;
flops_diff = 0;
flops_blas1 = 0;
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
FILE* output = fopen("output.bin", "w");
x_new.update_host();
fwrite(x_new.host_data(), sizeof(double), nx * ny, output);
fclose(output);
// meta data
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << ", " << options.ny << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
// print table sumarizing results
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "Goodbye!" << std::endl;
return 0;
}
| eb154b22428e44cfe66f1c2916e94197c3951996.cu | // ******************************************
// implicit time stepping implementation of 2D diffusion problem
// Ben Cumming, CSCS
// *****************************************
// A small benchmark app that solves the 2D fisher equation using second-order
// finite differences.
// Syntax: ./main nx ny nt t
#include <algorithm>
#include <iostream>
#include <sstream>
#include <fstream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <omp.h>
#include "data.h"
#include "linalg.h"
#include "operators.h"
#include "stats.h"
using namespace data;
using namespace linalg;
using namespace operators;
using namespace stats;
// read command line arguments
static void readcmdline(Discretization& options, int argc, char* argv[])
{
if (argc<5 || argc>6 ) {
std::cerr << "Usage: main nx ny nt t\n";
std::cerr << " nx number of gridpoints in x-direction\n";
std::cerr << " ny number of gridpoints in y-direction\n";
std::cerr << " nt number of timesteps\n";
std::cerr << " t total time\n";
std::cerr << " v [optional] turn on verbose output\n";
exit(1);
}
// read nx
options.nx = atoi(argv[1]);
if (options.nx < 1) {
std::cerr << "nx must be positive integer\n";
exit(-1);
}
// read ny
options.ny = atoi(argv[2]);
if (options.ny < 1) {
std::cerr << "ny must be positive integer\n";
exit(-1);
}
options.N = options.nx*options.ny;
// read nt
options.nt = atoi(argv[3]);
if (options.nt < 1) {
std::cerr << "nt must be positive integer\n";
exit(-1);
}
// read total time
double t = atof(argv[4]);
if (t < 0) {
std::cerr << "t must be positive real value\n";
exit(-1);
}
verbose_output = false;
if( argc==6 ) {
verbose_output = true;
}
// compute timestep size
options.dt = t / options.nt;
// compute the distance between grid points
// assume that x dimension has length 1.0
options.dx = 1. / (options.nx - 1);
// set alpha, assume diffusion coefficient D is 1
options.alpha = (options.dx * options.dx) / (1. * options.dt);
}
// ==============================================================================
int main(int argc, char* argv[])
{
// read command line arguments
readcmdline(options, argc, argv);
int nx = options.nx;
int ny = options.ny;
int nt = options.nt;
// initialize cuda
int device_count;
cuda_check_status( cudaGetDeviceCount(&device_count) );
if(device_count < 1) {
std::cerr << "error: there should be at least one device per node" << std::endl;
exit(-1);
}
cuda_check_status( cudaSetDevice(0) );
// get the cublas handle to force cublas initialization outside the main time
// stepping loop, to ensure that the timing doesn't count initialization costs
auto handle = cublas_handle();
// set iteration parameters
int max_cg_iters = 200;
int max_newton_iters = 50;
double tolerance = 1.e-6;
std::cout << "========================================================================" << std::endl;
std::cout << " Welcome to mini-stencil!" << std::endl;
std::cout << "version :: C++ with CUDA" << std::endl;
std::cout << "mesh :: " << options.nx << " * " << options.ny << " dx = " << options.dx << std::endl;
std::cout << "time :: " << nt << " time steps from 0 .. " << options.nt*options.dt << std::endl;;
std::cout << "iteration :: " << "CG " << max_cg_iters
<< ", Newton " << max_newton_iters
<< ", tolerance " << tolerance << std::endl;;
std::cout << "========================================================================" << std::endl;
// allocate global fields
x_new.init(nx,ny);
x_old.init(nx,ny);
bndN.init(nx,1);
bndS.init(nx,1);
bndE.init(ny,1);
bndW.init(ny,1);
Field b(nx,ny);
Field deltax(nx,ny);
// set dirichlet boundary conditions to 0 all around
ss_fill(bndN, 0.);
ss_fill(bndS, 0.);
ss_fill(bndE, 0.);
ss_fill(bndW, 0.);
// set the initial condition
// a circle of concentration 0.1 centred at (xdim/4, ydim/4) with radius
// no larger than 1/8 of both xdim and ydim
ss_fill(x_new, 0.);
double xc = 1.0 / 4.0;
double yc = (ny - 1) * options.dx / 4;
double radius = fmin(xc, yc) / 2.0;
for (int j = 0; j < ny; j++)
{
double y = (j - 1) * options.dx;
for (int i = 0; i < nx; i++)
{
double x = (i - 1) * options.dx;
if ((x - xc) * (x - xc) + (y - yc) * (y - yc) < radius * radius)
x_new[i+nx*j] = 0.1;
}
}
// update initial conditions on the device
x_new.update_device();
flops_bc = 0;
flops_diff = 0;
flops_blas1 = 0;
iters_cg = 0;
iters_newton = 0;
// start timer
double timespent = -omp_get_wtime();
// main timeloop
for (int timestep = 1; timestep <= nt; timestep++)
{
// set x_new and x_old to be the solution
ss_copy(x_old, x_new);
double residual;
bool converged = false;
int it;
for (it=0; it<max_newton_iters; it++)
{
// compute residual : requires both x_new and x_old
diffusion(x_new, b);
residual = ss_norm2(b);
// check for convergence
if (residual < tolerance)
{
converged = true;
break;
}
// solve linear system to get -deltax
bool cg_converged = false;
ss_cg(deltax, b, max_cg_iters, tolerance, cg_converged);
// check that the CG solver converged
if (!cg_converged) break;
// update solution
ss_axpy(x_new, -1.0, deltax);
}
iters_newton += it+1;
// output some statistics
if (converged && verbose_output) {
std::cout << "step " << timestep
<< " required " << it
<< " iterations for residual " << residual
<< std::endl;
}
if (!converged) {
std::cerr << "step " << timestep
<< " ERROR : nonlinear iterations failed to converge" << std::endl;;
break;
}
}
// get times
timespent += omp_get_wtime();
////////////////////////////////////////////////////////////////////
// write final solution to BOV file for visualization
////////////////////////////////////////////////////////////////////
// binary data
FILE* output = fopen("output.bin", "w");
x_new.update_host();
fwrite(x_new.host_data(), sizeof(double), nx * ny, output);
fclose(output);
// meta data
std::ofstream fid("output.bov");
fid << "TIME: 0.0" << std::endl;
fid << "DATA_FILE: output.bin" << std::endl;
fid << "DATA_SIZE: " << options.nx << ", " << options.ny << ", 1" << std::endl;;
fid << "DATA_FORMAT: DOUBLE" << std::endl;
fid << "VARIABLE: phi" << std::endl;
fid << "DATA_ENDIAN: LITTLE" << std::endl;
fid << "CENTERING: nodal" << std::endl;
fid << "BRICK_SIZE: 1.0 " << (options.ny-1)*options.dx << " 1.0" << std::endl;
// print table sumarizing results
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "simulation took " << timespent << " seconds" << std::endl;
std::cout << int(iters_cg) << " conjugate gradient iterations, at rate of "
<< float(iters_cg)/timespent << " iters/second" << std::endl;
std::cout << iters_newton << " newton iterations" << std::endl;
std::cout << "--------------------------------------------------------------------------------"
<< std::endl;
std::cout << "Goodbye!" << std::endl;
return 0;
}
|
042d7de1eadef4f607177fb16d6d20af6aa491b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : 3D_tundish.cu
Author : Jan Bohacek
Version :
Copyright :
Description : laminar flow in three-dimensional tundish in continuous casting
============================================================================
*/
#include <iostream>
//#include <stdio.h>
//#include <algorithm>
//#include <numeric>
#include <fstream>
#include <sstream>
#include <cstring>
//#include <ctime>
#include <math.h>
#include <iomanip>
#define DEFL
#define DEFLDIR // if commented solveICCG()
#define MIC0 // with DEFLDIR, if commented IC0 (incomplete Cholesky zero fill)
using namespace std;
typedef double T; // precision of calculation
typedef struct {
int Nx; // x-coordinate
int Ny; // y
int Nz; // z
T dx; // dx = dy = dz
} Dimensions; // dimensions of geometry
typedef struct {
int steps; // number of timesteps (-)
int maxIterSIMPLE; // maximum number of SIMPLE iterations
T CFL; // Courant number
T dt; // timestep size
T UZ; // inlet velocity
T ac; // volume of cell divided by timestep
T blocks; // for dot product
T blockSize; // -||-
T maxResU; // stopping criterion for velocity calculation
T maxResP; // pressure
T maxResSIMPLE; // SIMPLE
T urfU; // under-relaxation factor U
T urfP; // P
} Parameters; // simulation settings
typedef struct { // deflation
unsigned int NxZ;
unsigned int NyZ;
unsigned int nDV; // number of deflation vectors
unsigned int nRowsZ; // number of rows/columns for one deflation vector
T maxresZ;
} ParametersZ;
typedef struct {
T nu; // kinematic viscosity (m2/s)
T rho; // density
T cp; // specific heat
T k; // thermal conductivity
T alpha; // thermal diffusivity (m2/s)
T beta; // thermal expansion coefficient
} MaterialProperties;
// declare CPU fields
Dimensions dims;
Parameters params;
ParametersZ paramsZ;
MaterialProperties liquid;
// cache constant CUDA fields
__constant__ Dimensions d_dims;
__constant__ Parameters d_params;
__constant__ ParametersZ d_paramsZ;
__constant__ MaterialProperties d_liquid;
#include "cpuFunctions.h"
#include "cudaFunctions.h"
#include "cpuFunctionsDeflation.h"
#include "cudaFunctionsDeflation.h"
int main()
{
cout << "--flow in 3D tundish---" << endl;
// geometry
dims.Nx = 256;
dims.Ny = 64;
dims.Nz = 64;
dims.dx = 0.001;
// parameters deflation
paramsZ.nRowsZ = 16;
paramsZ.NxZ = dims.Nx/paramsZ.nRowsZ; // number of course cells in X
paramsZ.NyZ = dims.Ny/paramsZ.nRowsZ; // number of course cells in Y
paramsZ.nDV = paramsZ.NxZ * paramsZ.NyZ * dims.Nz/paramsZ.nRowsZ; // size of coarse system
paramsZ.maxresZ = 1e-8;
// paramaters
params.steps = 10000;
params.CFL = 1.0;
params.UZ = -0.5;
params.dt = params.CFL * dims.dx / fabs(params.UZ);
params.ac = dims.dx*dims.dx/params.dt;
params.blocks = 256;
params.blockSize = 128;
params.maxResU = 1e-5;
params.maxResP = 1e-5;
params.maxResSIMPLE = 1e-5;
params.maxIterSIMPLE = 1;
params.urfU = 0.7;
params.urfP = 0.3;
params.maxIterSIMPLE = 20;
// material properties
liquid.nu = 0.000001; // water 1e-6 m2/s
liquid.rho = 1000;
cout << "For Courant number of " << params.CFL << " the timestep size is " << params.dt << endl;
// CPU fields
T *ux; // ux-component of velocity
T *uy; // uy
T *uz; // uy
T *p; // pressure
T *m; // mass balance
T *hrh,*hsg; // dot products
T rhNew, rhOld, sg, ap, bt;
T endIter, endIterP, rhNewSIMPLE;
int iter, iterSIMPLE;
#ifdef DEFL
// CPU fields deflation
T *pc,*pf,*ps,*pw;
T *pzc, *pzf, *pzw, *pzs;
T *ec, *ef, *es, *ew;
T *hrZ, *hyZ, *hqZ, *hpZ, *hsZ;
T *L;
T *lc,*lf,*ls,*lw;
// GPU fields deflation
T *dpzc, *dpzf, *dpzw, *dpzs;
T *dec, *def, *des, *dew;
T *drZ, *dyZ, *dqZ, *dpZ, *dsZ;
T *drhs;
#endif
// GPU fields
T *dux , *duy , *duz; // velocity components
T *duxo, *duyo, *duzo; // old values
T *dp, *dpo; // pressure and old value
T *dm; // mass balance
T *duxtemp, *duytemp, *duztemp; // pointers for swapping fields
T *duxc,*duxf,*duxs,*duxw,*dkuxc,*dkuxf,*dkuxs,*dkuxw; // Aux
T *drx,*dqx,*dzx,*dpx; // Aux
T *duyc,*duyf,*duys,*duyw, *dkuyc,*dkuyf,*dkuys,*dkuyw; // Auy
T *dry,*dqy,*dzy,*dpy; // Auy
T *duzc,*duzf,*duzs,*duzw, *dkuzc,*dkuzf,*dkuzs,*dkuzw; // Auz
T *drz,*dqz,*dzz,*dpz; // Auz
T *dpc,*dpf,*dps,*dpw,*dkpc,*dkpf,*dkps,*dkpw; // Ap
T *drp,*dqp,*dzp,*dpp; // Ap
T *drh,*dsg; // dot products
// GPU parameters
int THREADS_PER_BLOCK = 1024;
int BLOCKS = ((dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2)+THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; // larger in order to have BLOCKS*THREADS_PER_BLOCK > Nx*Ny*Nz
dim3 dimBlockZ(paramsZ.nRowsZ,paramsZ.nRowsZ,1);
// taken from CUDA by example
// initialize fields
cpuInit(ux, uy, uz, p, m, hrh, hsg);
cudaInit(dux, duy, duz, dp, dm, duxo, duyo, duzo, dpo,
duxc, duxf, duxs, duxw, dkuxc, dkuxf, dkuxs, dkuxw, // Aux
drx, dqx, dzx, dpx, // Aux
duyc, duyf, duys, duyw, dkuyc, dkuyf, dkuys, dkuyw, // Auy
dry, dqy, dzy, dpy, // Auy
duzc, duzf, duzs, duzw, dkuzc, dkuzf, dkuzs, dkuzw, // Auz
drz, dqz, dzz, dpz, // Auz
dpc, dpf, dps, dpw, dkpc, dkpf, dkps, dkpw, // Ap
drp, dqp, dzp, dpp, // Ap
drh, dsg);
// patch anything to dux
//patchDux<<<BLOCKS,THREADS_PER_BLOCK>>>(dux);
// patch anything to duy
//patchDuy<<<BLOCKS,THREADS_PER_BLOCK>>>(duy);
// patch anything to duz
//patchDuz<<<BLOCKS,THREADS_PER_BLOCK>>>(duz);
/*// copy back to host and save
hipMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(p, dp, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), hipMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)0, "testTundish");*/
// Aux (x-component of velocity)
hipLaunchKernelGGL(( Aux), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duxc, duxf, duxs, duxw);
// AuxInlet not necessary, velocity inlet condition ux=0 is the same as no slip condition at wall
hipLaunchKernelGGL(( AuxOutlet), dim3(1),dim3(100), 0, 0, duxc,200,15);
hipLaunchKernelGGL(( makeTNS1), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dkuxc,dkuxf,dkuxs,dkuxw,duxc,duxf,duxs,duxw,dims.Nx-1,dims.Ny,dims.Nz);
// Auy (y-component of velocity)
hipLaunchKernelGGL(( Auy), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duyc, duyf, duys, duyw);
//AuyInlet not necessary
hipLaunchKernelGGL(( AuyOutlet), dim3(1),dim3(100), 0, 0, duyc,200,15);
hipLaunchKernelGGL(( makeTNS1), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dkuyc,dkuyf,dkuys,dkuyw,duyc,duyf,duys,duyw,dims.Nx,dims.Ny-1,dims.Nz);
// Auz (z-component of velocity)
hipLaunchKernelGGL(( Auz), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duzc, duzf, duzs, duzw);
//AuzInlet not necessary
hipLaunchKernelGGL(( AuzOutlet), dim3(1),dim3(100), 0, 0, duzc,200,15);
hipLaunchKernelGGL(( makeTNS1), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dkuzc,dkuzf,dkuzs,dkuzw,duzc,duzf,duzs,duzw,dims.Nx,dims.Ny,dims.Nz-1);
// Ap (pressure)
hipLaunchKernelGGL(( Ap), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dpc, dpf, dps, dpw);
hipLaunchKernelGGL(( ApOutlet), dim3(1),dim3(100), 0, 0, dpc,200,15); // Dirichlet, p=0
hipLaunchKernelGGL(( makeTNS1), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dkpc,dkpf,dkps,dkpw,dpc,dpf,dps,dpw,dims.Nx,dims.Ny,dims.Nz);
#ifdef DEFL
cpuInitDeflation(pzc, pzf, pzs, pzw,
ec, ef, es, ew,
pc, pf, ps, pw,
lc, lf, ls, lw,
hrZ,hyZ,hqZ,hpZ,hsZ,
L);
hipMemcpy(pc,dpc,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+2*dims.Nx*dims.Ny),hipMemcpyDeviceToHost);
hipMemcpy(pf,dpf,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+dims.Nx*dims.Ny ),hipMemcpyDeviceToHost);
hipMemcpy(ps,dps,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+dims.Nx ),hipMemcpyDeviceToHost);
hipMemcpy(pw,dpw,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+1 ),hipMemcpyDeviceToHost);
initAZ(pzc,pzf,pzs,pzw,pc,pf,ps,pw);
initE(ec,ef,es,ew,pc,pf,ps,pw);
cudaInitDeflation(dpzc,dpzf,dpzs,dpzw,
dec,def,des,dew,
drZ,dyZ,dqZ,dpZ,
drhs,
ec,ef,es,ew,
pzc,pzf,pzs,pzw);
#ifdef DEFLDIR
Chol(L,ec,ef,es,ew); // Cholesky factorization
#else
IChol(lc,lf,ls,lw,ec,ef,es,ew); // incomplete Cholesky factorization with zero fill (IC(0) or MIC(0))
#endif
#endif
/*for (int i=0; i<paramsZ.nDV;i++){
//cout << ec[i+paramsZ.NxZ*paramsZ.NyZ] << endl;
cout << ew[i] << endl;
}
*/
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
for (int miter=0; miter<params.steps; miter++) {
// boundary conditions
hipLaunchKernelGGL(( bcVelWallNoslip), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dux, duy, duz); // no slip at walls
hipLaunchKernelGGL(( bcVelInlet), dim3(1),dim3(100), 0, 0, duz, params.UZ, 50, 45); // bcVelInlet<<<1,inletWidth>>>(dux, duy, duz, velocity, first index in x, first index in y);
hipLaunchKernelGGL(( bcVelOutlet), dim3(1),dim3(100), 0, 0, dux, duy, duz, 200, 15); // bcVelOutlet<<<1,outletwidth>>>(dux, duy, duz, first index in x, first index in y);
//swap old and new arrays for next timestep
duxtemp = duxo; duxo = dux; dux = duxtemp;
duytemp = duyo; duyo = duy; duy = duytemp;
duztemp = duzo; duzo = duz; duz = duztemp;
// advect horizontal and vertical velocity components
hipLaunchKernelGGL(( advectUx), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dux, duxo, duyo, duzo);
hipLaunchKernelGGL(( advectUy), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duy, duxo, duyo, duzo);
hipLaunchKernelGGL(( advectUz), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duz, duxo, duyo, duzo);
hipLaunchKernelGGL(( bcVelWallNoslip), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dux, duy, duz); // no slip at walls
hipLaunchKernelGGL(( bcVelInlet), dim3(1),dim3(100), 0, 0, duz, params.UZ, 50, 45); // bcVelInlet<<<1,inletWidth>>>(dux, duy, duz, velocity, first index in x, first index in y);
hipLaunchKernelGGL(( bcVelOutlet), dim3(1),dim3(100), 0, 0, dux, duy, duz, 200, 15); // bcVelOutlet<<<1,outletwidth>>>(dux, duy, duz, first index in x, first index in y);
hipMemcpy(duxo, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToDevice);
hipMemcpy(duyo, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToDevice);
hipMemcpy(duzo, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToDevice);
// ************ BEGIN SIMPLE **********
iterSIMPLE = 0;
rhNewSIMPLE = 1;
/*// copy back to host and save
hipMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(p, dp, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), hipMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)0, "testTundish");*/
while (rhNewSIMPLE > params.maxResSIMPLE) { //(iterSIMPLE < params.maxIterSIMPLE) {
iterSIMPLE++;
// ********** BEGIN solve UX **********
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx, dux,dims.Nx-1,dims.Ny,dims.Nz); // drx := dux
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqx,duxc,duxf,duxs,duxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // q := Aux ux
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx, duxo,dims.Nx-1,dims.Ny,dims.Nz);
hipLaunchKernelGGL(( b), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx,dims.Nx-1,dims.Ny,dims.Nz); // drx := bx
// bxInlet not necessary as ux=0 there
hipLaunchKernelGGL(( bpx), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx,dpo,dims.Nx-1,dims.Ny,dims.Nz); // add grad(p) to rhs of Ax=b
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx,dqx,(T)-1.,(T)1.,dims.Nx-1,dims.Ny, dims.Nz); // r = r - q
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzx,dkuxc,dkuxf,dkuxs,dkuxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // z = M^(-1)r
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh,drx,dzx,dims.Nx-1,dims.Ny,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Ux residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
hipMemcpy(dpx, dzx, sizeof(T)*(dims.Nx-1)*dims.Ny*(dims.Nz+2),hipMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dpx,dzx,(T)1.,bt,dims.Nx-1,dims.Ny,dims.Nz); // p = z + beta*p
}
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqx,duxc,duxf,duxs,duxw,dpx,dims.Nx-1,dims.Ny,dims.Nz); // q := Aux p
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, dsg, dpx, dqx, dims.Nx-1, dims.Ny,dims.Nz);
hipMemcpy(hsg, dsg, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drx,dqx,-ap,(T)1.,dims.Nx-1,dims.Ny,dims.Nz); // r = r - alpha*q
hipLaunchKernelGGL(( AXPY2), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dux,dpx, ap,(T)1.,dims.Nx-1,dims.Ny,dims.Nz); // x = x + alpha*p; Note: sizeof(dux) != sizeof(dpx)
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzx,dkuxc,dkuxf,dkuxs,dkuxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh, drx, dzx, dims.Nx-1, dims.Ny,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Ux iter number: " << iter << endl;
// ********** END solve UX ************
// ********** BEGIN solve UY **********
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry, duy,dims.Nx,dims.Ny-1,dims.Nz); // dry := duy
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqy,duyc,duyf,duys,duyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // q := Auy uy
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry, duyo,dims.Nx,dims.Ny-1,dims.Nz);
hipLaunchKernelGGL(( b), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry,dims.Nx,dims.Ny-1,dims.Nz);
//byOutlet not necessary due to zero gradient condition
hipLaunchKernelGGL(( bpy), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry,dpo,dims.Nx,dims.Ny-1,dims.Nz); // add grad(p) to rhs of Ax=b
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry,dqy,(T)-1.,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // r = r - q
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzy,dkuyc,dkuyf,dkuys,dkuyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // z = M^(-1)r
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh, dry, dzy, dims.Nx, dims.Ny-1,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Uy residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
hipMemcpy(dpy, dzy, sizeof(T)*dims.Nx*(dims.Ny-1)*(dims.Nz+2),hipMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dpy,dzy,(T)1.,bt,dims.Nx,dims.Ny-1,dims.Nz); // p = z + beta*p
}
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqy,duyc,duyf,duys,duyw,dpy,dims.Nx,dims.Ny-1,dims.Nz); // q := Auy p
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, dsg,dpy,dqy,dims.Nx,dims.Ny-1,dims.Nz);
hipMemcpy(hsg, dsg, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dry,dqy,-ap,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // r = r - alpha*q
hipLaunchKernelGGL(( AXPY2), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duy,dpy, ap,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // x = x + alpha*p; Note: sizeof(duy) != sizeof(dpy)
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzy,dkuyc,dkuyf,dkuys,dkuyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh, dry, dzy, dims.Nx, dims.Ny-1,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Uy iter number: " << iter << endl;
// ********** END solve UY ************
// ********** BEGIN solve UZ **********
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz, duz,dims.Nx,dims.Ny,dims.Nz-1); // dry := duy
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqz,duzc,duzf,duzs,duzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // q := Auy uy
hipLaunchKernelGGL(( duToDr), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz, duzo,dims.Nx,dims.Ny,dims.Nz-1);
hipLaunchKernelGGL(( b), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz,dims.Nx,dims.Ny,dims.Nz-1);
hipLaunchKernelGGL(( bzInlet), dim3(1),dim3(100), 0, 0, drz, duz, 50, 45);
//bzOutlet not necessary due to zero gradient condition
hipLaunchKernelGGL(( bpz), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz,dpo,dims.Nx,dims.Ny,dims.Nz-1); // add grad(p) to rhs of Ax=b
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz,dqz,(T)-1.,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // r = r - q
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzz,dkuzc,dkuzf,dkuzs,dkuzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // z = M^(-1)r
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh, drz, dzz, dims.Nx, dims.Ny,dims.Nz-1);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Uz residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
hipMemcpy(dpz, dzz, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+1),hipMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dpz,dzz,(T)1.,bt,dims.Nx,dims.Ny,dims.Nz-1); // p = z + beta*p
}
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqz,duzc,duzf,duzs,duzw,dpz,dims.Nx,dims.Ny,dims.Nz-1); // q := Auz p
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, dsg,dpz,dqz,dims.Nx,dims.Ny,dims.Nz-1);
hipMemcpy(hsg, dsg, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drz,dqz,-ap,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // r = r - alpha*q
hipLaunchKernelGGL(( AXPY2), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duz,dpz, ap,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // x = x + alpha*p; Note: sizeof(duz) != sizeof(dpz)
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzz,dkuzc,dkuzf,dkuzs,dkuzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // z = M^(-1)r
rhOld = rhNew;
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh, drz, dzz, dims.Nx, dims.Ny,dims.Nz-1);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Uz iter number: " << iter << endl;
// ********** END solve UZ ************
// update velocity at boundary
hipLaunchKernelGGL(( bcVelOutlet), dim3(1),dim3(100), 0, 0, dux, duy, duz, 200, 15);
// ********** BEGIN solve P ***********
// The finite volume method in computational fluid dynamics, F. Moukalled, L. Mangani, M. Darwish
// Patankar's SIMPLE
hipMemcpy(drp,dp,sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqp,dpc,dpf,dps,dpw,drp,dims.Nx,dims.Ny,dims.Nz); // q := Ap p
hipLaunchKernelGGL(( bp), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drp,dux,duy,duz,dims.Nx,dims.Ny,dims.Nz); // should become at convergence == zero correction field
#ifdef DEFL // store rhs (b) for correction
hipMemcpy(drhs,drp,sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),hipMemcpyDeviceToDevice);
#endif
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh,drp,drp,dims.Nx,dims.Ny,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNewSIMPLE = dot(hrh,params.blocks);
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drp,dqp,(T)-1.,(T)1.,dims.Nx,dims.Ny,dims.Nz); // r = r - q
#ifdef DEFL
hipLaunchKernelGGL(( localDOTGPU<T,256>), dim3(256),dim3(dimBlockZ),256*sizeof(T), 0, drZ,drp); // equivalent to ZTransXYDeflation
hipMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
hipMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyHostToDevice); //copy hyZ to dyZ
hipLaunchKernelGGL(( YMinusAzXYDeflation), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drp,dyZ,dpzc,dpzf,dpzs,dpzw); // r = P*r
//cout << "stopped" << endl;
//break;
#endif
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzp,dkpc,dkpf,dkps,dkpw,drp,dims.Nx,dims.Ny,dims.Nz); // z = M^(-1)r
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh,drp,dzp,dims.Nx,dims.Ny,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "P residual at start: " << rhNew << endl;
if (iterSIMPLE==1) endIterP = rhNew * params.maxResP * params.maxResP;
iter = 0;
while (rhNew > endIterP) { //(iter<8) {
iter++;
//cout << "iteration:" << iter << ", residual: " << setprecision(11) << rhNew << endl;
if (iter==1) {
hipMemcpy(dpp, dzp, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),hipMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dpp,dzp,(T)1.,bt,dims.Nx,dims.Ny,dims.Nz); // p = z + beta*p
}
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqp,dpc,dpf,dps,dpw,dpp,dims.Nx,dims.Ny,dims.Nz); // q := Ap p
#ifdef DEFL
hipLaunchKernelGGL(( localDOTGPU<T,256>), dim3(256),dim3(dimBlockZ),256*sizeof(T), 0, drZ,dqp); // equivalent to ZTransXYDeflation
hipMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
hipMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyHostToDevice); //copy hyZ to dyZ
hipLaunchKernelGGL(( YMinusAzXYDeflation), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqp,dyZ,dpzc,dpzf,dpzs,dpzw); // r = P*r
#endif
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, dsg,dpp,dqp,dims.Nx,dims.Ny,dims.Nz);
hipMemcpy(hsg, dsg, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, drp,dqp,-ap,(T)1.,dims.Nx,dims.Ny,dims.Nz); // r = r - alpha*q
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dp ,dpp, ap,(T)1.,dims.Nx,dims.Ny,dims.Nz); // x = x + alpha*p
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dzp,dkpc,dkpf,dkps,dkpw,drp,dims.Nx,dims.Ny,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
hipLaunchKernelGGL(( DOTGPU<T,128>), dim3(params.blocks),dim3(params.blockSize),params.blockSize*sizeof(T), 0, drh,drp,dzp,dims.Nx,dims.Ny,dims.Nz);
hipMemcpy(hrh, drh, params.blocks*sizeof(T), hipMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
#ifdef DEFL // y:= Q*b + P^T*y
// P^T * y
hipLaunchKernelGGL(( SpMV), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dqp,dpc,dpf,dps,dpw,dp,dims.Nx,dims.Ny,dims.Nz);
hipLaunchKernelGGL(( localDOTGPU<T,256>), dim3(256),dim3(dimBlockZ),256*sizeof(T), 0, drZ,dqp); // equivalent to ZTransXYDeflation
hipMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
hipMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyHostToDevice); //copy hyZ to dyZ (= y2)
hipLaunchKernelGGL(( YMinusZXYDeflation), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dp,dyZ); // P^T*y := y -Z*y2
hipLaunchKernelGGL(( localDOTGPU<T,256>), dim3(256),dim3(dimBlockZ),256*sizeof(T), 0, drZ,drhs); // equivalent to ZTransXYDeflation
hipMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
hipMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),hipMemcpyHostToDevice); // copy hyZ to dyZ (= y2)
hipLaunchKernelGGL(( YPlusZXYDeflation), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dp,dyZ); // P^T*y + Q*b := y + Z*y2
#endif
//cout << "P iter number: " << iter << endl;
//cout << "P residual at end: " << rhNew << endl;
// ********** END solve P ************
// ***** BEGIN correct P, UX, UY fields ******
hipLaunchKernelGGL(( correctUX), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dux,dp,dims.Nx-1,dims.Ny,dims.Nz); // ux = -dt/rho*dp/dx
hipLaunchKernelGGL(( correctUY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duy,dp,dims.Nx,dims.Ny-1,dims.Nz); // uy = -dt/rho*dp/dy
hipLaunchKernelGGL(( correctUZ), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, duz,dp,dims.Nx,dims.Ny,dims.Nz-1); // uz = -dt/rho*dp/dz
hipLaunchKernelGGL(( AXPY), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dp,dpo,(T)1.,params.urfP,dims.Nx,dims.Ny,dims.Nz); // p = urfP*p + pold
hipMemcpy(dpo, dp, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),hipMemcpyDeviceToDevice); // pold = p
hipMemset(dp , 0, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2));
hipLaunchKernelGGL(( bcVelOutlet), dim3(1),dim3(100), 0, 0, dux, duy, duz, 200, 15);
// ****** END correct P, UX, UY fields *******
// ***** BEGIN check mass conservation *****
hipLaunchKernelGGL(( bp), dim3(BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, dm,dux,duy,duz,dims.Nx,dims.Ny,dims.Nz);
hipMemcpy(m, dm, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2), hipMemcpyDeviceToHost);
// ****** END check mass conservation *******/
/*// copy back to host and save
hipMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), hipMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)iterSIMPLE, "testTundish");*/
}
// ************** END SIMPLE *****************
if (miter%20 == 0) {
// copy back to host and save
hipMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), hipMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)miter, "3Dtundish_accurate");
}
cout << "SIMPLE iter number: " << iterSIMPLE << endl;
}
cout << "simulation finished." << endl;
/*
//hipMemcpy(p, drp, sizeof(T)*(dims.Nx)*(dims.Ny+2), hipMemcpyDeviceToHost);
ofstream File;
File.open("ckeck_pw");
for (int i=0;i<dims.Nx*dims.Ny*dims.Nz;i++) {
File << pw[i] << endl;
}
File.close();*/
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
cout<< "ellapsed time (cuda): " << elapsedTime << " miliseconds" << endl;
/*// copy back to host and save
hipMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), hipMemcpyDeviceToHost);
hipMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), hipMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)100, "testTundish-max20-all1e-2");*/
cpuFinalize(ux, uy, uz, p, m, hrh, hsg);
cudaFinalize(dux, duy, duz, dp, dm, duxo, duyo, duzo, dpo,
duxc, duxf, duxs, duxw, dkuxc, dkuxf, dkuxs, dkuxw, // Aux
drx, dqx, dzx, dpx, // Aux
duyc, duyf, duys, duyw, dkuyc, dkuyf, dkuys, dkuyw, // Auy
dry, dqy, dzy, dpy, // Auy
duzc, duzf, duzs, duzw, dkuzc, dkuzf, dkuzs, dkuzw, // Auz
drz, dqz, dzz, dpz, // Auz
dpc, dpf, dps, dpw, dkpc, dkpf, dkps, dkpw, // Ap
drp, dqp, dzp, dpp, // Ap
drh, dsg);
#ifdef DEFL
cpuFinalizeDeflation(pzc, pzf, pzs, pzw,
ec, ef, es, ew,
pc, pf, ps, pw,
lc, lf, ls, lw,
hrZ,hyZ,hqZ,hpZ,hsZ,
L);
cudaFinalizeDeflation(dpzc,dpzf,dpzs,dpzw,
dec,def,des,dew,
drZ,dyZ,dqZ,dpZ,
drhs);
#endif
return 0;
}
| 042d7de1eadef4f607177fb16d6d20af6aa491b1.cu | /*
============================================================================
Name : 3D_tundish.cu
Author : Jan Bohacek
Version :
Copyright :
Description : laminar flow in three-dimensional tundish in continuous casting
============================================================================
*/
#include <iostream>
//#include <stdio.h>
//#include <algorithm>
//#include <numeric>
#include <fstream>
#include <sstream>
#include <cstring>
//#include <ctime>
#include <math.h>
#include <iomanip>
#define DEFL
#define DEFLDIR // if commented solveICCG()
#define MIC0 // with DEFLDIR, if commented IC0 (incomplete Cholesky zero fill)
using namespace std;
typedef double T; // precision of calculation
typedef struct {
int Nx; // x-coordinate
int Ny; // y
int Nz; // z
T dx; // dx = dy = dz
} Dimensions; // dimensions of geometry
typedef struct {
int steps; // number of timesteps (-)
int maxIterSIMPLE; // maximum number of SIMPLE iterations
T CFL; // Courant number
T dt; // timestep size
T UZ; // inlet velocity
T ac; // volume of cell divided by timestep
T blocks; // for dot product
T blockSize; // -||-
T maxResU; // stopping criterion for velocity calculation
T maxResP; // pressure
T maxResSIMPLE; // SIMPLE
T urfU; // under-relaxation factor U
T urfP; // P
} Parameters; // simulation settings
typedef struct { // deflation
unsigned int NxZ;
unsigned int NyZ;
unsigned int nDV; // number of deflation vectors
unsigned int nRowsZ; // number of rows/columns for one deflation vector
T maxresZ;
} ParametersZ;
typedef struct {
T nu; // kinematic viscosity (m2/s)
T rho; // density
T cp; // specific heat
T k; // thermal conductivity
T alpha; // thermal diffusivity (m2/s)
T beta; // thermal expansion coefficient
} MaterialProperties;
// declare CPU fields
Dimensions dims;
Parameters params;
ParametersZ paramsZ;
MaterialProperties liquid;
// cache constant CUDA fields
__constant__ Dimensions d_dims;
__constant__ Parameters d_params;
__constant__ ParametersZ d_paramsZ;
__constant__ MaterialProperties d_liquid;
#include "cpuFunctions.h"
#include "cudaFunctions.h"
#include "cpuFunctionsDeflation.h"
#include "cudaFunctionsDeflation.h"
int main()
{
cout << "--flow in 3D tundish---" << endl;
// geometry
dims.Nx = 256;
dims.Ny = 64;
dims.Nz = 64;
dims.dx = 0.001;
// parameters deflation
paramsZ.nRowsZ = 16;
paramsZ.NxZ = dims.Nx/paramsZ.nRowsZ; // number of course cells in X
paramsZ.NyZ = dims.Ny/paramsZ.nRowsZ; // number of course cells in Y
paramsZ.nDV = paramsZ.NxZ * paramsZ.NyZ * dims.Nz/paramsZ.nRowsZ; // size of coarse system
paramsZ.maxresZ = 1e-8;
// paramaters
params.steps = 10000;
params.CFL = 1.0;
params.UZ = -0.5;
params.dt = params.CFL * dims.dx / fabs(params.UZ);
params.ac = dims.dx*dims.dx/params.dt;
params.blocks = 256;
params.blockSize = 128;
params.maxResU = 1e-5;
params.maxResP = 1e-5;
params.maxResSIMPLE = 1e-5;
params.maxIterSIMPLE = 1;
params.urfU = 0.7;
params.urfP = 0.3;
params.maxIterSIMPLE = 20;
// material properties
liquid.nu = 0.000001; // water 1e-6 m2/s
liquid.rho = 1000;
cout << "For Courant number of " << params.CFL << " the timestep size is " << params.dt << endl;
// CPU fields
T *ux; // ux-component of velocity
T *uy; // uy
T *uz; // uy
T *p; // pressure
T *m; // mass balance
T *hrh,*hsg; // dot products
T rhNew, rhOld, sg, ap, bt;
T endIter, endIterP, rhNewSIMPLE;
int iter, iterSIMPLE;
#ifdef DEFL
// CPU fields deflation
T *pc,*pf,*ps,*pw;
T *pzc, *pzf, *pzw, *pzs;
T *ec, *ef, *es, *ew;
T *hrZ, *hyZ, *hqZ, *hpZ, *hsZ;
T *L;
T *lc,*lf,*ls,*lw;
// GPU fields deflation
T *dpzc, *dpzf, *dpzw, *dpzs;
T *dec, *def, *des, *dew;
T *drZ, *dyZ, *dqZ, *dpZ, *dsZ;
T *drhs;
#endif
// GPU fields
T *dux , *duy , *duz; // velocity components
T *duxo, *duyo, *duzo; // old values
T *dp, *dpo; // pressure and old value
T *dm; // mass balance
T *duxtemp, *duytemp, *duztemp; // pointers for swapping fields
T *duxc,*duxf,*duxs,*duxw,*dkuxc,*dkuxf,*dkuxs,*dkuxw; // Aux
T *drx,*dqx,*dzx,*dpx; // Aux
T *duyc,*duyf,*duys,*duyw, *dkuyc,*dkuyf,*dkuys,*dkuyw; // Auy
T *dry,*dqy,*dzy,*dpy; // Auy
T *duzc,*duzf,*duzs,*duzw, *dkuzc,*dkuzf,*dkuzs,*dkuzw; // Auz
T *drz,*dqz,*dzz,*dpz; // Auz
T *dpc,*dpf,*dps,*dpw,*dkpc,*dkpf,*dkps,*dkpw; // Ap
T *drp,*dqp,*dzp,*dpp; // Ap
T *drh,*dsg; // dot products
// GPU parameters
int THREADS_PER_BLOCK = 1024;
int BLOCKS = ((dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2)+THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK; // larger in order to have BLOCKS*THREADS_PER_BLOCK > Nx*Ny*Nz
dim3 dimBlockZ(paramsZ.nRowsZ,paramsZ.nRowsZ,1);
// taken from CUDA by example
// initialize fields
cpuInit(ux, uy, uz, p, m, hrh, hsg);
cudaInit(dux, duy, duz, dp, dm, duxo, duyo, duzo, dpo,
duxc, duxf, duxs, duxw, dkuxc, dkuxf, dkuxs, dkuxw, // Aux
drx, dqx, dzx, dpx, // Aux
duyc, duyf, duys, duyw, dkuyc, dkuyf, dkuys, dkuyw, // Auy
dry, dqy, dzy, dpy, // Auy
duzc, duzf, duzs, duzw, dkuzc, dkuzf, dkuzs, dkuzw, // Auz
drz, dqz, dzz, dpz, // Auz
dpc, dpf, dps, dpw, dkpc, dkpf, dkps, dkpw, // Ap
drp, dqp, dzp, dpp, // Ap
drh, dsg);
// patch anything to dux
//patchDux<<<BLOCKS,THREADS_PER_BLOCK>>>(dux);
// patch anything to duy
//patchDuy<<<BLOCKS,THREADS_PER_BLOCK>>>(duy);
// patch anything to duz
//patchDuz<<<BLOCKS,THREADS_PER_BLOCK>>>(duz);
/*// copy back to host and save
cudaMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dp, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), cudaMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)0, "testTundish");*/
// Aux (x-component of velocity)
Aux<<<BLOCKS,THREADS_PER_BLOCK>>>(duxc, duxf, duxs, duxw);
// AuxInlet not necessary, velocity inlet condition ux=0 is the same as no slip condition at wall
AuxOutlet<<<1,100>>>(duxc,200,15);
makeTNS1<<<BLOCKS,THREADS_PER_BLOCK>>>(dkuxc,dkuxf,dkuxs,dkuxw,duxc,duxf,duxs,duxw,dims.Nx-1,dims.Ny,dims.Nz);
// Auy (y-component of velocity)
Auy<<<BLOCKS,THREADS_PER_BLOCK>>>(duyc, duyf, duys, duyw);
//AuyInlet not necessary
AuyOutlet<<<1,100>>>(duyc,200,15);
makeTNS1<<<BLOCKS,THREADS_PER_BLOCK>>>(dkuyc,dkuyf,dkuys,dkuyw,duyc,duyf,duys,duyw,dims.Nx,dims.Ny-1,dims.Nz);
// Auz (z-component of velocity)
Auz<<<BLOCKS,THREADS_PER_BLOCK>>>(duzc, duzf, duzs, duzw);
//AuzInlet not necessary
AuzOutlet<<<1,100>>>(duzc,200,15);
makeTNS1<<<BLOCKS,THREADS_PER_BLOCK>>>(dkuzc,dkuzf,dkuzs,dkuzw,duzc,duzf,duzs,duzw,dims.Nx,dims.Ny,dims.Nz-1);
// Ap (pressure)
Ap<<<BLOCKS,THREADS_PER_BLOCK>>>(dpc, dpf, dps, dpw);
ApOutlet<<<1,100>>>(dpc,200,15); // Dirichlet, p=0
makeTNS1<<<BLOCKS,THREADS_PER_BLOCK>>>(dkpc,dkpf,dkps,dkpw,dpc,dpf,dps,dpw,dims.Nx,dims.Ny,dims.Nz);
#ifdef DEFL
cpuInitDeflation(pzc, pzf, pzs, pzw,
ec, ef, es, ew,
pc, pf, ps, pw,
lc, lf, ls, lw,
hrZ,hyZ,hqZ,hpZ,hsZ,
L);
cudaMemcpy(pc,dpc,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+2*dims.Nx*dims.Ny),cudaMemcpyDeviceToHost);
cudaMemcpy(pf,dpf,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+dims.Nx*dims.Ny ),cudaMemcpyDeviceToHost);
cudaMemcpy(ps,dps,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+dims.Nx ),cudaMemcpyDeviceToHost);
cudaMemcpy(pw,dpw,sizeof(T)*(dims.Nx*dims.Ny*dims.Nz+1 ),cudaMemcpyDeviceToHost);
initAZ(pzc,pzf,pzs,pzw,pc,pf,ps,pw);
initE(ec,ef,es,ew,pc,pf,ps,pw);
cudaInitDeflation(dpzc,dpzf,dpzs,dpzw,
dec,def,des,dew,
drZ,dyZ,dqZ,dpZ,
drhs,
ec,ef,es,ew,
pzc,pzf,pzs,pzw);
#ifdef DEFLDIR
Chol(L,ec,ef,es,ew); // Cholesky factorization
#else
IChol(lc,lf,ls,lw,ec,ef,es,ew); // incomplete Cholesky factorization with zero fill (IC(0) or MIC(0))
#endif
#endif
/*for (int i=0; i<paramsZ.nDV;i++){
//cout << ec[i+paramsZ.NxZ*paramsZ.NyZ] << endl;
cout << ew[i] << endl;
}
*/
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
for (int miter=0; miter<params.steps; miter++) {
// boundary conditions
bcVelWallNoslip<<<BLOCKS,THREADS_PER_BLOCK>>>(dux, duy, duz); // no slip at walls
bcVelInlet<<<1,100>>>(duz, params.UZ, 50, 45); // bcVelInlet<<<1,inletWidth>>>(dux, duy, duz, velocity, first index in x, first index in y);
bcVelOutlet<<<1,100>>>(dux, duy, duz, 200, 15); // bcVelOutlet<<<1,outletwidth>>>(dux, duy, duz, first index in x, first index in y);
//swap old and new arrays for next timestep
duxtemp = duxo; duxo = dux; dux = duxtemp;
duytemp = duyo; duyo = duy; duy = duytemp;
duztemp = duzo; duzo = duz; duz = duztemp;
// advect horizontal and vertical velocity components
advectUx<<<BLOCKS,THREADS_PER_BLOCK>>>(dux, duxo, duyo, duzo);
advectUy<<<BLOCKS,THREADS_PER_BLOCK>>>(duy, duxo, duyo, duzo);
advectUz<<<BLOCKS,THREADS_PER_BLOCK>>>(duz, duxo, duyo, duzo);
bcVelWallNoslip<<<BLOCKS,THREADS_PER_BLOCK>>>(dux, duy, duz); // no slip at walls
bcVelInlet<<<1,100>>>(duz, params.UZ, 50, 45); // bcVelInlet<<<1,inletWidth>>>(dux, duy, duz, velocity, first index in x, first index in y);
bcVelOutlet<<<1,100>>>(dux, duy, duz, 200, 15); // bcVelOutlet<<<1,outletwidth>>>(dux, duy, duz, first index in x, first index in y);
cudaMemcpy(duxo, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToDevice);
cudaMemcpy(duyo, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToDevice);
cudaMemcpy(duzo, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToDevice);
// ************ BEGIN SIMPLE **********
iterSIMPLE = 0;
rhNewSIMPLE = 1;
/*// copy back to host and save
cudaMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dp, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), cudaMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)0, "testTundish");*/
while (rhNewSIMPLE > params.maxResSIMPLE) { //(iterSIMPLE < params.maxIterSIMPLE) {
iterSIMPLE++;
// ********** BEGIN solve UX **********
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(drx, dux,dims.Nx-1,dims.Ny,dims.Nz); // drx := dux
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqx,duxc,duxf,duxs,duxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // q := Aux ux
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(drx, duxo,dims.Nx-1,dims.Ny,dims.Nz);
b<<<BLOCKS,THREADS_PER_BLOCK>>>(drx,dims.Nx-1,dims.Ny,dims.Nz); // drx := bx
// bxInlet not necessary as ux=0 there
bpx<<<BLOCKS,THREADS_PER_BLOCK>>>(drx,dpo,dims.Nx-1,dims.Ny,dims.Nz); // add grad(p) to rhs of Ax=b
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drx,dqx,(T)-1.,(T)1.,dims.Nx-1,dims.Ny, dims.Nz); // r = r - q
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzx,dkuxc,dkuxf,dkuxs,dkuxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // z = M^(-1)r
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh,drx,dzx,dims.Nx-1,dims.Ny,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Ux residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
cudaMemcpy(dpx, dzx, sizeof(T)*(dims.Nx-1)*dims.Ny*(dims.Nz+2),cudaMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dpx,dzx,(T)1.,bt,dims.Nx-1,dims.Ny,dims.Nz); // p = z + beta*p
}
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqx,duxc,duxf,duxs,duxw,dpx,dims.Nx-1,dims.Ny,dims.Nz); // q := Aux p
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(dsg, dpx, dqx, dims.Nx-1, dims.Ny,dims.Nz);
cudaMemcpy(hsg, dsg, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drx,dqx,-ap,(T)1.,dims.Nx-1,dims.Ny,dims.Nz); // r = r - alpha*q
AXPY2<<<BLOCKS,THREADS_PER_BLOCK>>>(dux,dpx, ap,(T)1.,dims.Nx-1,dims.Ny,dims.Nz); // x = x + alpha*p; Note: sizeof(dux) != sizeof(dpx)
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzx,dkuxc,dkuxf,dkuxs,dkuxw,drx,dims.Nx-1,dims.Ny,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh, drx, dzx, dims.Nx-1, dims.Ny,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Ux iter number: " << iter << endl;
// ********** END solve UX ************
// ********** BEGIN solve UY **********
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(dry, duy,dims.Nx,dims.Ny-1,dims.Nz); // dry := duy
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqy,duyc,duyf,duys,duyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // q := Auy uy
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(dry, duyo,dims.Nx,dims.Ny-1,dims.Nz);
b<<<BLOCKS,THREADS_PER_BLOCK>>>(dry,dims.Nx,dims.Ny-1,dims.Nz);
//byOutlet not necessary due to zero gradient condition
bpy<<<BLOCKS,THREADS_PER_BLOCK>>>(dry,dpo,dims.Nx,dims.Ny-1,dims.Nz); // add grad(p) to rhs of Ax=b
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dry,dqy,(T)-1.,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // r = r - q
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzy,dkuyc,dkuyf,dkuys,dkuyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // z = M^(-1)r
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh, dry, dzy, dims.Nx, dims.Ny-1,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Uy residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
cudaMemcpy(dpy, dzy, sizeof(T)*dims.Nx*(dims.Ny-1)*(dims.Nz+2),cudaMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dpy,dzy,(T)1.,bt,dims.Nx,dims.Ny-1,dims.Nz); // p = z + beta*p
}
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqy,duyc,duyf,duys,duyw,dpy,dims.Nx,dims.Ny-1,dims.Nz); // q := Auy p
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(dsg,dpy,dqy,dims.Nx,dims.Ny-1,dims.Nz);
cudaMemcpy(hsg, dsg, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dry,dqy,-ap,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // r = r - alpha*q
AXPY2<<<BLOCKS,THREADS_PER_BLOCK>>>(duy,dpy, ap,(T)1.,dims.Nx,dims.Ny-1,dims.Nz); // x = x + alpha*p; Note: sizeof(duy) != sizeof(dpy)
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzy,dkuyc,dkuyf,dkuys,dkuyw,dry,dims.Nx,dims.Ny-1,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh, dry, dzy, dims.Nx, dims.Ny-1,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Uy iter number: " << iter << endl;
// ********** END solve UY ************
// ********** BEGIN solve UZ **********
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(drz, duz,dims.Nx,dims.Ny,dims.Nz-1); // dry := duy
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqz,duzc,duzf,duzs,duzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // q := Auy uy
duToDr<<<BLOCKS,THREADS_PER_BLOCK>>>(drz, duzo,dims.Nx,dims.Ny,dims.Nz-1);
b<<<BLOCKS,THREADS_PER_BLOCK>>>(drz,dims.Nx,dims.Ny,dims.Nz-1);
bzInlet<<<1,100>>>(drz, duz, 50, 45);
//bzOutlet not necessary due to zero gradient condition
bpz<<<BLOCKS,THREADS_PER_BLOCK>>>(drz,dpo,dims.Nx,dims.Ny,dims.Nz-1); // add grad(p) to rhs of Ax=b
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drz,dqz,(T)-1.,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // r = r - q
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzz,dkuzc,dkuzf,dkuzs,dkuzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // z = M^(-1)r
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh, drz, dzz, dims.Nx, dims.Ny,dims.Nz-1);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "Uz residual at start: " << rhNew << endl;
endIter = rhNew * params.maxResU * params.maxResU;
iter = 0;
while (rhNew > endIter) {
iter++;
if (iter==1) {
cudaMemcpy(dpz, dzz, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+1),cudaMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dpz,dzz,(T)1.,bt,dims.Nx,dims.Ny,dims.Nz-1); // p = z + beta*p
}
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqz,duzc,duzf,duzs,duzw,dpz,dims.Nx,dims.Ny,dims.Nz-1); // q := Auz p
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(dsg,dpz,dqz,dims.Nx,dims.Ny,dims.Nz-1);
cudaMemcpy(hsg, dsg, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drz,dqz,-ap,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // r = r - alpha*q
AXPY2<<<BLOCKS,THREADS_PER_BLOCK>>>(duz,dpz, ap,(T)1.,dims.Nx,dims.Ny,dims.Nz-1); // x = x + alpha*p; Note: sizeof(duz) != sizeof(dpz)
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzz,dkuzc,dkuzf,dkuzs,dkuzw,drz,dims.Nx,dims.Ny,dims.Nz-1); // z = M^(-1)r
rhOld = rhNew;
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh, drz, dzz, dims.Nx, dims.Ny,dims.Nz-1);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
//cout << "Uz iter number: " << iter << endl;
// ********** END solve UZ ************
// update velocity at boundary
bcVelOutlet<<<1,100>>>(dux, duy, duz, 200, 15);
// ********** BEGIN solve P ***********
// The finite volume method in computational fluid dynamics, F. Moukalled, L. Mangani, M. Darwish
// Patankar's SIMPLE
cudaMemcpy(drp,dp,sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),cudaMemcpyDeviceToDevice);
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqp,dpc,dpf,dps,dpw,drp,dims.Nx,dims.Ny,dims.Nz); // q := Ap p
bp<<<BLOCKS,THREADS_PER_BLOCK>>>(drp,dux,duy,duz,dims.Nx,dims.Ny,dims.Nz); // should become at convergence == zero correction field
#ifdef DEFL // store rhs (b) for correction
cudaMemcpy(drhs,drp,sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),cudaMemcpyDeviceToDevice);
#endif
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh,drp,drp,dims.Nx,dims.Ny,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNewSIMPLE = dot(hrh,params.blocks);
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drp,dqp,(T)-1.,(T)1.,dims.Nx,dims.Ny,dims.Nz); // r = r - q
#ifdef DEFL
localDOTGPU<T,256><<<256,dimBlockZ,256*sizeof(T)>>>(drZ,drp); // equivalent to ZTransXYDeflation
cudaMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
cudaMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyHostToDevice); //copy hyZ to dyZ
YMinusAzXYDeflation<<<BLOCKS,THREADS_PER_BLOCK>>>(drp,dyZ,dpzc,dpzf,dpzs,dpzw); // r = P*r
//cout << "stopped" << endl;
//break;
#endif
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzp,dkpc,dkpf,dkps,dkpw,drp,dims.Nx,dims.Ny,dims.Nz); // z = M^(-1)r
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh,drp,dzp,dims.Nx,dims.Ny,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
//cout << "P residual at start: " << rhNew << endl;
if (iterSIMPLE==1) endIterP = rhNew * params.maxResP * params.maxResP;
iter = 0;
while (rhNew > endIterP) { //(iter<8) {
iter++;
//cout << "iteration:" << iter << ", residual: " << setprecision(11) << rhNew << endl;
if (iter==1) {
cudaMemcpy(dpp, dzp, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),cudaMemcpyDeviceToDevice);
}
else {
bt = rhNew/rhOld;
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dpp,dzp,(T)1.,bt,dims.Nx,dims.Ny,dims.Nz); // p = z + beta*p
}
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqp,dpc,dpf,dps,dpw,dpp,dims.Nx,dims.Ny,dims.Nz); // q := Ap p
#ifdef DEFL
localDOTGPU<T,256><<<256,dimBlockZ,256*sizeof(T)>>>(drZ,dqp); // equivalent to ZTransXYDeflation
cudaMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
cudaMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyHostToDevice); //copy hyZ to dyZ
YMinusAzXYDeflation<<<BLOCKS,THREADS_PER_BLOCK>>>(dqp,dyZ,dpzc,dpzf,dpzs,dpzw); // r = P*r
#endif
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(dsg,dpp,dqp,dims.Nx,dims.Ny,dims.Nz);
cudaMemcpy(hsg, dsg, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
sg = dot(hsg,params.blocks);
ap = rhNew/sg; // alpha = rhoNew / sigma
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(drp,dqp,-ap,(T)1.,dims.Nx,dims.Ny,dims.Nz); // r = r - alpha*q
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dp ,dpp, ap,(T)1.,dims.Nx,dims.Ny,dims.Nz); // x = x + alpha*p
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dzp,dkpc,dkpf,dkps,dkpw,drp,dims.Nx,dims.Ny,dims.Nz); // z = M^(-1)r
rhOld = rhNew;
DOTGPU<T,128><<<params.blocks,params.blockSize,params.blockSize*sizeof(T)>>>(drh,drp,dzp,dims.Nx,dims.Ny,dims.Nz);
cudaMemcpy(hrh, drh, params.blocks*sizeof(T), cudaMemcpyDeviceToHost);
rhNew = dot(hrh,params.blocks);
}
#ifdef DEFL // y:= Q*b + P^T*y
// P^T * y
SpMV<<<BLOCKS,THREADS_PER_BLOCK>>>(dqp,dpc,dpf,dps,dpw,dp,dims.Nx,dims.Ny,dims.Nz);
localDOTGPU<T,256><<<256,dimBlockZ,256*sizeof(T)>>>(drZ,dqp); // equivalent to ZTransXYDeflation
cudaMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
cudaMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyHostToDevice); //copy hyZ to dyZ (= y2)
YMinusZXYDeflation<<<BLOCKS,THREADS_PER_BLOCK>>>(dp,dyZ); // P^T*y := y -Z*y2
localDOTGPU<T,256><<<256,dimBlockZ,256*sizeof(T)>>>(drZ,drhs); // equivalent to ZTransXYDeflation
cudaMemcpy(hrZ,drZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyDeviceToHost); // copy drZ to hrZ
#ifdef DEFLDIR
solveDC(hyZ,hrZ,L);
#else
solveICCG(hsZ,hrZ,hyZ,hpZ,hqZ,
ec,ef,es,ew,
lc,lf,ls,lw);
#endif
cudaMemcpy(dyZ,hyZ,(paramsZ.nDV+2*paramsZ.NxZ*paramsZ.NyZ)*sizeof(T),cudaMemcpyHostToDevice); // copy hyZ to dyZ (= y2)
YPlusZXYDeflation<<<BLOCKS,THREADS_PER_BLOCK>>>(dp,dyZ); // P^T*y + Q*b := y + Z*y2
#endif
//cout << "P iter number: " << iter << endl;
//cout << "P residual at end: " << rhNew << endl;
// ********** END solve P ************
// ***** BEGIN correct P, UX, UY fields ******
correctUX<<<BLOCKS,THREADS_PER_BLOCK>>>(dux,dp,dims.Nx-1,dims.Ny,dims.Nz); // ux = -dt/rho*dp/dx
correctUY<<<BLOCKS,THREADS_PER_BLOCK>>>(duy,dp,dims.Nx,dims.Ny-1,dims.Nz); // uy = -dt/rho*dp/dy
correctUZ<<<BLOCKS,THREADS_PER_BLOCK>>>(duz,dp,dims.Nx,dims.Ny,dims.Nz-1); // uz = -dt/rho*dp/dz
AXPY<<<BLOCKS,THREADS_PER_BLOCK>>>(dp,dpo,(T)1.,params.urfP,dims.Nx,dims.Ny,dims.Nz); // p = urfP*p + pold
cudaMemcpy(dpo, dp, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2),cudaMemcpyDeviceToDevice); // pold = p
cudaMemset(dp , 0, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2));
bcVelOutlet<<<1,100>>>(dux, duy, duz, 200, 15);
// ****** END correct P, UX, UY fields *******
// ***** BEGIN check mass conservation *****
bp<<<BLOCKS,THREADS_PER_BLOCK>>>(dm,dux,duy,duz,dims.Nx,dims.Ny,dims.Nz);
cudaMemcpy(m, dm, sizeof(T)*dims.Nx*dims.Ny*(dims.Nz+2), cudaMemcpyDeviceToHost);
// ****** END check mass conservation *******/
/*// copy back to host and save
cudaMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), cudaMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)iterSIMPLE, "testTundish");*/
}
// ************** END SIMPLE *****************
if (miter%20 == 0) {
// copy back to host and save
cudaMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), cudaMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)miter, "3Dtundish_accurate");
}
cout << "SIMPLE iter number: " << iterSIMPLE << endl;
}
cout << "simulation finished." << endl;
/*
//cudaMemcpy(p, drp, sizeof(T)*(dims.Nx)*(dims.Ny+2), cudaMemcpyDeviceToHost);
ofstream File;
File.open("ckeck_pw");
for (int i=0;i<dims.Nx*dims.Ny*dims.Nz;i++) {
File << pw[i] << endl;
}
File.close();*/
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<< "ellapsed time (cuda): " << elapsedTime << " miliseconds" << endl;
/*// copy back to host and save
cudaMemcpy(ux, dux, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uy, duy, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(uz, duz, sizeof(T)*(dims.Nx+2)*(dims.Ny+2)*(dims.Nz+2), cudaMemcpyDeviceToHost);
cudaMemcpy(p, dpo, sizeof(T)*dims.Nx * dims.Ny *(dims.Nz+2), cudaMemcpyDeviceToHost);
saveDataInTime(ux, uy, uz, p, m, (T)100, "testTundish-max20-all1e-2");*/
cpuFinalize(ux, uy, uz, p, m, hrh, hsg);
cudaFinalize(dux, duy, duz, dp, dm, duxo, duyo, duzo, dpo,
duxc, duxf, duxs, duxw, dkuxc, dkuxf, dkuxs, dkuxw, // Aux
drx, dqx, dzx, dpx, // Aux
duyc, duyf, duys, duyw, dkuyc, dkuyf, dkuys, dkuyw, // Auy
dry, dqy, dzy, dpy, // Auy
duzc, duzf, duzs, duzw, dkuzc, dkuzf, dkuzs, dkuzw, // Auz
drz, dqz, dzz, dpz, // Auz
dpc, dpf, dps, dpw, dkpc, dkpf, dkps, dkpw, // Ap
drp, dqp, dzp, dpp, // Ap
drh, dsg);
#ifdef DEFL
cpuFinalizeDeflation(pzc, pzf, pzs, pzw,
ec, ef, es, ew,
pc, pf, ps, pw,
lc, lf, ls, lw,
hrZ,hyZ,hqZ,hpZ,hsZ,
L);
cudaFinalizeDeflation(dpzc,dpzf,dpzs,dpzw,
dec,def,des,dew,
drZ,dyZ,dqZ,dpZ,
drhs);
#endif
return 0;
}
|
f5760c183bb871440ae564aed5e1b1f7bce4f93e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
/*
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
// timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
// timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
/*
timer.Start();
transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
hipFree(d_in);
hipFree(d_out);
}
| f5760c183bb871440ae564aed5e1b1f7bce4f93e.cu | #include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
/*
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
// timer.Start();
transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out);
// timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
/*
timer.Start();
transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
timer.Start();
transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
*/
cudaFree(d_in);
cudaFree(d_out);
}
|
6b3d8408e168edb3fc6925ed9830c8747b78d061.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <opencv2/opencv.hpp>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input Image: " << image.rows << " x " << image.cols << " x 3"
<< std::endl;
return image;
}
void save_image(const char* output_filename,
float* buffer,
int height,
int width) {
cv::Mat output_image(height, width, CV_32FC3, buffer);
// Make negative values zero.
cv::threshold(output_image,
output_image,
/*threshold=*/0,
/*maxval=*/0,
cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
cv::Mat image = load_image(argv[1]);
hipSetDevice(gpu_id);
cudnnHandle_t cudnn{nullptr};
cudnnCreate(&cudnn);
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
int batch_size{0}, channels{0}, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&convolution_algorithm));
// CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
void* d_workspace{nullptr};
hipMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{nullptr};
hipMalloc(&d_input, image_bytes);
hipMemcpy(d_input, image.ptr<float>(0), image_bytes, hipMemcpyHostToDevice);
float* d_output{nullptr};
hipMalloc(&d_output, image_bytes);
hipMemset(d_output, 0, image_bytes);
// clang-format off
float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{nullptr};
hipMalloc(&d_kernel, sizeof(h_kernel));
hipMemcpy(d_kernel, h_kernel, sizeof(h_kernel), hipMemcpyHostToDevice);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
float* h_output = new float[image_bytes];
hipMemcpy(h_output, d_output, image_bytes, hipMemcpyDeviceToHost);
save_image("cudnn-out.png", h_output, height, width);
delete[] h_output;
hipFree(d_kernel);
hipFree(d_input);
hipFree(d_output);
hipFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
| 6b3d8408e168edb3fc6925ed9830c8747b78d061.cu | #include <cudnn.h>
#include <cassert>
#include <cstdlib>
#include <iostream>
#include <opencv2/opencv.hpp>
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
cv::Mat load_image(const char* image_path) {
cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
image.convertTo(image, CV_32FC3);
cv::normalize(image, image, 0, 1, cv::NORM_MINMAX);
std::cerr << "Input Image: " << image.rows << " x " << image.cols << " x 3"
<< std::endl;
return image;
}
void save_image(const char* output_filename,
float* buffer,
int height,
int width) {
cv::Mat output_image(height, width, CV_32FC3, buffer);
// Make negative values zero.
cv::threshold(output_image,
output_image,
/*threshold=*/0,
/*maxval=*/0,
cv::THRESH_TOZERO);
cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX);
output_image.convertTo(output_image, CV_8UC3);
cv::imwrite(output_filename, output_image);
std::cerr << "Wrote output to " << output_filename << std::endl;
}
int main(int argc, const char* argv[]) {
if (argc < 2) {
std::cerr << "usage: conv <image> [gpu=0] [sigmoid=0]" << std::endl;
std::exit(EXIT_FAILURE);
}
int gpu_id = (argc > 2) ? std::atoi(argv[2]) : 0;
std::cerr << "GPU: " << gpu_id << std::endl;
bool with_sigmoid = (argc > 3) ? std::atoi(argv[3]) : 0;
std::cerr << "With sigmoid: " << std::boolalpha << with_sigmoid << std::endl;
cv::Mat image = load_image(argv[1]);
cudaSetDevice(gpu_id);
cudnnHandle_t cudnn{nullptr};
cudnnCreate(&cudnn);
cudnnTensorDescriptor_t input_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&input_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(input_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnFilterDescriptor_t kernel_descriptor;
checkCUDNN(cudnnCreateFilterDescriptor(&kernel_descriptor));
checkCUDNN(cudnnSetFilter4dDescriptor(kernel_descriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/3,
/*in_channels=*/3,
/*kernel_height=*/3,
/*kernel_width=*/3));
cudnnConvolutionDescriptor_t convolution_descriptor;
checkCUDNN(cudnnCreateConvolutionDescriptor(&convolution_descriptor));
checkCUDNN(cudnnSetConvolution2dDescriptor(convolution_descriptor,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
/*computeType=*/CUDNN_DATA_FLOAT));
int batch_size{0}, channels{0}, height{0}, width{0};
checkCUDNN(cudnnGetConvolution2dForwardOutputDim(convolution_descriptor,
input_descriptor,
kernel_descriptor,
&batch_size,
&channels,
&height,
&width));
std::cerr << "Output Image: " << height << " x " << width << " x " << channels
<< std::endl;
cudnnTensorDescriptor_t output_descriptor;
checkCUDNN(cudnnCreateTensorDescriptor(&output_descriptor));
checkCUDNN(cudnnSetTensor4dDescriptor(output_descriptor,
/*format=*/CUDNN_TENSOR_NHWC,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/3,
/*image_height=*/image.rows,
/*image_width=*/image.cols));
cudnnConvolutionFwdAlgo_t convolution_algorithm;
checkCUDNN(
cudnnGetConvolutionForwardAlgorithm(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
CUDNN_CONVOLUTION_FWD_PREFER_FASTEST,
0,
&convolution_algorithm));
// CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM
size_t workspace_bytes{0};
checkCUDNN(cudnnGetConvolutionForwardWorkspaceSize(cudnn,
input_descriptor,
kernel_descriptor,
convolution_descriptor,
output_descriptor,
convolution_algorithm,
&workspace_bytes));
std::cerr << "Workspace size: " << (workspace_bytes / 1048576.0) << "MB"
<< std::endl;
assert(workspace_bytes > 0);
void* d_workspace{nullptr};
cudaMalloc(&d_workspace, workspace_bytes);
int image_bytes = batch_size * channels * height * width * sizeof(float);
float* d_input{nullptr};
cudaMalloc(&d_input, image_bytes);
cudaMemcpy(d_input, image.ptr<float>(0), image_bytes, cudaMemcpyHostToDevice);
float* d_output{nullptr};
cudaMalloc(&d_output, image_bytes);
cudaMemset(d_output, 0, image_bytes);
// clang-format off
float kernel_template[3][3] = {
{1, 1, 1},
{1, -8, 1},
{1, 1, 1}
};
// clang-format on
float h_kernel[3][3][3][3];
for (int kernel = 0; kernel < 3; ++kernel) {
for (int channel = 0; channel < 3; ++channel) {
for (int row = 0; row < 3; ++row) {
for (int column = 0; column < 3; ++column) {
h_kernel[kernel][channel][row][column] = kernel_template[row][column];
}
}
}
}
float* d_kernel{nullptr};
cudaMalloc(&d_kernel, sizeof(h_kernel));
cudaMemcpy(d_kernel, h_kernel, sizeof(h_kernel), cudaMemcpyHostToDevice);
const float alpha = 1.0f, beta = 0.0f;
checkCUDNN(cudnnConvolutionForward(cudnn,
&alpha,
input_descriptor,
d_input,
kernel_descriptor,
d_kernel,
convolution_descriptor,
convolution_algorithm,
d_workspace,
workspace_bytes,
&beta,
output_descriptor,
d_output));
if (with_sigmoid) {
cudnnActivationDescriptor_t activation_descriptor;
checkCUDNN(cudnnCreateActivationDescriptor(&activation_descriptor));
checkCUDNN(cudnnSetActivationDescriptor(activation_descriptor,
CUDNN_ACTIVATION_SIGMOID,
CUDNN_PROPAGATE_NAN,
/*relu_coef=*/0));
checkCUDNN(cudnnActivationForward(cudnn,
activation_descriptor,
&alpha,
output_descriptor,
d_output,
&beta,
output_descriptor,
d_output));
cudnnDestroyActivationDescriptor(activation_descriptor);
}
float* h_output = new float[image_bytes];
cudaMemcpy(h_output, d_output, image_bytes, cudaMemcpyDeviceToHost);
save_image("cudnn-out.png", h_output, height, width);
delete[] h_output;
cudaFree(d_kernel);
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_workspace);
cudnnDestroyTensorDescriptor(input_descriptor);
cudnnDestroyTensorDescriptor(output_descriptor);
cudnnDestroyFilterDescriptor(kernel_descriptor);
cudnnDestroyConvolutionDescriptor(convolution_descriptor);
cudnnDestroy(cudnn);
}
|
e85c645213326e5526d08538a2ba5fba8fa3b533.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//kernel
__global__ void BuildBx( double * Bx, const int * frombus, const int * tobus, const int * BranchStatus, const double * xline, const int numline, const int Mat4_Width)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if(Row == frombus[Row] && Col == tobus[Row])
{
Bx[frombus[Row]*Mat4_Width + tobus[Row]] = 1/xline[Row];
}
}
| e85c645213326e5526d08538a2ba5fba8fa3b533.cu | //kernel
__global__ void BuildBx( double * Bx, const int * frombus, const int * tobus, const int * BranchStatus, const double * xline, const int numline, const int Mat4_Width)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if(Row == frombus[Row] && Col == tobus[Row])
{
Bx[frombus[Row]*Mat4_Width + tobus[Row]] = 1/xline[Row];
}
}
|
d9bea82d838156acf4ec049e98ed561801655e96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <hipcub/hipcub.hpp>
namespace cudf {
namespace io {
constexpr int32_t batch_size = (1 << 5);
constexpr int32_t batch_count = (1 << 2);
constexpr int32_t prefetch_size = (1 << 9); // 512B, in 32B chunks
constexpr bool log_cyclecount = false;
void __device__ busy_wait(size_t cycles)
{
clock_t start = clock();
for (;;) {
clock_t const now = clock();
clock_t const elapsed = now > start ? now - start : now + (0xffff'ffff - start);
if (elapsed >= cycles) return;
}
}
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
*/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
*/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[batch_count]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[batch_count * batch_size]; ///< LZ77 batch data
uint8_t buf[prefetch_size]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
*/
struct unsnap_state_s {
uint8_t const* base; ///< base ptr of compressed stream
uint8_t const* end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< remaining bytes to decompress
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
device_span<uint8_t const> src; ///< input for current block
device_span<uint8_t> dst; ///< output for current block
};
inline __device__ volatile uint8_t& byte_access(unsnap_state_s* s, uint32_t pos)
{
return s->q.buf[pos & (prefetch_size - 1)];
}
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_prefetch_bytestream(unsnap_state_s* s, int t)
{
uint8_t const* base = s->base;
auto end = (uint32_t)(s->end - base);
auto align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
__syncwarp();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, prefetch_size - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
busy_wait(20);
}
}
blen = shuffle(blen);
if (t < blen) { byte_access(s, pos + t) = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
*/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
*/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_decode_symbols(unsnap_state_s* s, uint32_t t)
{
uint32_t cur = 0;
auto end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s* b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
b = &s->q.batch[batch * batch_size];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = shuffle(cur);
cur_t = cur + t;
b0 = byte_access(s, cur_t);
v0 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 32);
v1 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 64);
v2 = ballot((b0 == 4) || (b0 & 2));
len3_mask = shuffle((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = ballot(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s*>(shuffle(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = shuffle((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = shuffle(blen, batch_len - 1);
cur = shuffle(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < batch_size - 2 && shuffle(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = byte_access(s, cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = ballot(clen & 1);
v1 = ballot((clen >> 1) & 1);
len3_mask = shuffle((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaa'aaaa) & mask_t) +
__popc((len3_mask & 0x5555'5555) & mask_t);
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= batch_size);
batch_add = __ffs(ballot(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = shuffle(blen, batch_add - 1);
cur = shuffle(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < batch_size - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < batch_size) {
uint32_t blen, offset;
uint8_t b0 = byte_access(s, cur);
if (b0 & 3) {
uint8_t b1 = byte_access(s, cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (byte_access(s, cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (byte_access(s, cur + 3) << 16) | (byte_access(s, cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = byte_access(s, cur + 1);
if (num_bytes > 1) {
blen |= byte_access(s, cur + 2) << 8;
if (num_bytes > 2) {
blen |= byte_access(s, cur + 3) << 16;
if (num_bytes > 3) { blen |= byte_access(s, cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (batch_count - 1);
}
}
batch_len = shuffle(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) {
busy_wait(20);
}
}
if (batch_len != batch_size) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
* @param temp_storage temporary storage used by the algorithm
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
*/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s* s, int t, Storage& temp_storage)
{
auto const literal_base = s->base;
auto out = s->dst.data();
int batch = 0;
do {
volatile unsnap_batch_s* b = &s->q.batch[batch * batch_size];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) {
busy_wait(20);
}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (shuffle(min((uint32_t)dist_t, (uint32_t)shuffle_xor(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = ballot((uint32_t)dist_t < bofs);
uint32_t start_mask =
hipcub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = shuffle(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - shuffle(bofs - blen_t, it);
int32_t dist = shuffle(dist_t, it);
if (it < n) {
uint8_t const* src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += shuffle(bofs, n - 1);
blen_t = shuffle(blen_t, (n + t) & 0x1f);
dist_t = shuffle(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = shuffle(blen_t, i);
int32_t dist = shuffle(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? shuffle(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = shuffle(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
uint8_t const* src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
uint8_t const* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
uint8_t const* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
__syncwarp();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (batch_count - 1);
} while (true);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ hipcub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s* s = &state_g;
int strm_id = blockIdx.x;
if (t < batch_count) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
s->src = inputs[strm_id];
s->dst = outputs[strm_id];
auto cur = s->src.begin();
auto const end = s->src.end();
s->error = 0;
if (log_cyclecount) { s->tstart = clock(); }
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->dst.size())) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
results[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
results[strm_id].status =
(s->error == 0) ? compression_status::SUCCESS : compression_status::FAILURE;
if (log_cyclecount) {
results[strm_id].reserved = clock() - s->tstart;
} else {
results[strm_id].reserved = 0;
}
}
}
void gpu_unsnap(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(inputs.size(), 1); // TODO: Check max grid dimensions vs max expected count
hipLaunchKernelGGL(( unsnap_kernel<128>), dim3(dim_grid), dim3(dim_block), 0, stream.value(), inputs, outputs, results);
}
} // namespace io
} // namespace cudf
| d9bea82d838156acf4ec049e98ed561801655e96.cu | /*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gpuinflate.hpp"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <cub/cub.cuh>
namespace cudf {
namespace io {
constexpr int32_t batch_size = (1 << 5);
constexpr int32_t batch_count = (1 << 2);
constexpr int32_t prefetch_size = (1 << 9); // 512B, in 32B chunks
constexpr bool log_cyclecount = false;
void __device__ busy_wait(size_t cycles)
{
clock_t start = clock();
for (;;) {
clock_t const now = clock();
clock_t const elapsed = now > start ? now - start : now + (0xffff'ffff - start);
if (elapsed >= cycles) return;
}
}
/**
* @brief Describes a single LZ77 symbol (single entry in batch)
*/
struct unsnap_batch_s {
int32_t len; // 1..64 = Number of bytes
uint32_t
offset; // copy distance if greater than zero or negative of literal offset in byte stream
};
/**
* @brief Queue structure used to exchange data between warps
*/
struct unsnap_queue_s {
uint32_t prefetch_wrpos; ///< Prefetcher write position
uint32_t prefetch_rdpos; ///< Prefetch consumer read position
int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher)
int32_t batch_len[batch_count]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count
unsnap_batch_s batch[batch_count * batch_size]; ///< LZ77 batch data
uint8_t buf[prefetch_size]; ///< Prefetch buffer
};
/**
* @brief snappy decompression state
*/
struct unsnap_state_s {
uint8_t const* base; ///< base ptr of compressed stream
uint8_t const* end; ///< end of compressed stream
uint32_t uncompressed_size; ///< uncompressed stream size
uint32_t bytes_left; ///< remaining bytes to decompress
int32_t error; ///< current error status
uint32_t tstart; ///< start time for perf logging
volatile unsnap_queue_s q; ///< queue for cross-warp communication
device_span<uint8_t const> src; ///< input for current block
device_span<uint8_t> dst; ///< output for current block
};
inline __device__ volatile uint8_t& byte_access(unsnap_state_s* s, uint32_t pos)
{
return s->q.buf[pos & (prefetch_size - 1)];
}
/**
* @brief prefetches data for the symbol decoding stage
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_prefetch_bytestream(unsnap_state_s* s, int t)
{
uint8_t const* base = s->base;
auto end = (uint32_t)(s->end - base);
auto align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base)));
int32_t pos = min(align_bytes, end);
int32_t blen;
// Start by prefetching up to the next a 32B-aligned location
if (t < pos) { s->q.buf[t] = base[t]; }
blen = 0;
do {
__syncwarp();
if (!t) {
uint32_t minrdpos;
s->q.prefetch_wrpos = pos;
minrdpos = pos - min(pos, prefetch_size - 32u);
blen = (int)min(32u, end - pos);
for (;;) {
uint32_t rdpos = s->q.prefetch_rdpos;
if (rdpos >= minrdpos) break;
if (s->q.prefetch_end) {
blen = 0;
break;
}
busy_wait(20);
}
}
blen = shuffle(blen);
if (t < blen) { byte_access(s, pos + t) = base[pos + t]; }
pos += blen;
} while (blen > 0);
}
/**
* @brief Lookup table for get_len3_mask()
*
* Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of
* 3-byte code lengths in the lower 4 bits, along with the total number of
* bytes used for coding the four lengths in the upper 4 bits.
* The upper 4-bit value could also be obtained by 8+__popc(mask4)
*
* for (uint32_t k = 0; k < 1024; k++)
* {
* for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++)
* {
* v |= (b & 1) << i;
* n += (b & 1) + 2;
* b >>= (b & 1) + 2;
* }
* k_len3lut[k] = v | (n << 4);
* }
*/
static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = {
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3,
0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf,
0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb,
0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb,
0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf,
0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf,
0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf};
/**
* @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte
* code length, given an input mask of up to 96 bits.
*
* Implemented by doing 8 consecutive lookups, building the result 4-bit at a time
*/
inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2)
{
uint32_t m, v, m4, n;
v = v0;
m4 = k_len3lut[v & 0x3ff];
m = m4 & 0xf;
n = m4 >> 4; // 8..12
v = v0 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 4;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v0, v1, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 8;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 12;
n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16
v1 = __funnelshift_r(v1, v2, n);
v2 >>= n;
v = v1;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 16;
n = m4 >> 4; // 8..12
v = v1 >> n;
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 20;
n += m4 >> 4; // 16..24
v = __funnelshift_r(v1, v2, n);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 24;
n += m4 >> 4; // 24..36
v >>= (m4 >> 4);
m4 = k_len3lut[v & 0x3ff];
m |= (m4 & 0xf) << 28;
return m;
}
/**
* @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length
* minus 2, given two input masks each containing bit0 or bit1 of the corresponding
* code length minus 2 for up to 32 bytes
*/
inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1)
{
uint32_t m;
m = (v1 & 1) * 2 + (v0 & 1);
v0 >>= (m + 2);
v1 >>= (m + 1);
for (uint32_t i = 1; i < 16; i++) {
uint32_t m2 = (v1 & 2) | (v0 & 1);
uint32_t n = m2 + 2;
m |= m2 << (i * 2);
v0 >>= n;
v1 >>= n;
}
return m;
}
/**
* @brief decode symbols and output LZ77 batches (single-warp)
*
* @param s decompression state
* @param t warp lane id
*/
__device__ void snappy_decode_symbols(unsnap_state_s* s, uint32_t t)
{
uint32_t cur = 0;
auto end = static_cast<uint32_t>(s->end - s->base);
uint32_t bytes_left = s->uncompressed_size;
uint32_t dst_pos = 0;
int32_t batch = 0;
for (;;) {
int32_t batch_len;
volatile unsnap_batch_s* b;
// Wait for prefetcher
if (t == 0) {
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
b = &s->q.batch[batch * batch_size];
}
// Process small symbols in parallel: for data that does not get good compression,
// the stream will consist of a large number of short literals (1-byte or 2-byte)
// followed by short repeat runs. This results in many 2-byte or 3-byte symbols
// that can all be decoded in parallel once we know the symbol length.
{
uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask;
uint32_t b0;
cur = shuffle(cur);
cur_t = cur + t;
b0 = byte_access(s, cur_t);
v0 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 32);
v1 = ballot((b0 == 4) || (b0 & 2));
b0 = byte_access(s, cur_t + 64);
v2 = ballot((b0 == 4) || (b0 & 2));
len3_mask = shuffle((t == 0) ? get_len3_mask(v0, v1, v2) : 0);
cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1));
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0);
short_sym_mask = ballot(is_long_sym);
batch_len = 0;
b = reinterpret_cast<volatile unsnap_batch_s*>(shuffle(reinterpret_cast<uintptr_t>(b)));
if (!(short_sym_mask & 1)) {
batch_len = shuffle((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0);
if (batch_len != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_len) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[t].len = blen;
b[t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); }
if (batch_len != 0) {
blen = shuffle(blen, batch_len - 1);
cur = shuffle(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
}
}
}
// Check if the batch was stopped by a 3-byte or 4-byte literal
if (batch_len < batch_size - 2 && shuffle(b0 & ~4, batch_len) == 8) {
// If so, run a slower version of the above that can also handle 3/4-byte literal sequences
uint32_t batch_add;
do {
uint32_t clen, mask_t;
cur_t = cur + t;
b0 = byte_access(s, cur_t);
clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2
v0 = ballot(clen & 1);
v1 = ballot((clen >> 1) & 1);
len3_mask = shuffle((t == 0) ? get_len5_mask(v0, v1) : 0);
mask_t = (1 << (2 * t)) - 1;
cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaa'aaaa) & mask_t) +
__popc((len3_mask & 0x5555'5555) & mask_t);
b0 = byte_access(s, cur_t);
is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) ||
(batch_len + t >= batch_size);
batch_add = __ffs(ballot(is_long_sym)) - 1;
if (batch_add != 0) {
uint32_t blen = 0;
int32_t ofs = 0;
if (t < batch_add) {
blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1);
ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | byte_access(s, cur_t + 1)
: (b0 & 2) ? byte_access(s, cur_t + 1) | (byte_access(s, cur_t + 2) << 8)
: -(int32_t)(cur_t + 1);
b[batch_len + t].len = blen;
b[batch_len + t].offset = ofs;
ofs += blen; // for correct out-of-range detection below
}
blen = WarpReducePos32(blen, t);
bytes_left = shuffle(bytes_left);
dst_pos = shuffle(dst_pos);
short_sym_mask = __ffs(ballot(blen > bytes_left || ofs > (int32_t)(dst_pos + blen)));
if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); }
if (batch_add != 0) {
blen = shuffle(blen, batch_add - 1);
cur = shuffle(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3);
if (t == 0) {
dst_pos += blen;
bytes_left -= blen;
}
batch_len += batch_add;
}
}
} while (batch_add >= 6 && batch_len < batch_size - 2);
}
}
if (t == 0) {
while (bytes_left > 0 && batch_len < batch_size) {
uint32_t blen, offset;
uint8_t b0 = byte_access(s, cur);
if (b0 & 3) {
uint8_t b1 = byte_access(s, cur + 1);
if (!(b0 & 2)) {
// xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset
offset = ((b0 & 0xe0) << 3) | b1;
blen = ((b0 >> 2) & 7) + 4;
cur += 2;
} else {
// xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset
offset = b1 | (byte_access(s, cur + 2) << 8);
if (b0 & 1) // 4-byte offset
{
offset |= (byte_access(s, cur + 3) << 16) | (byte_access(s, cur + 4) << 24);
cur += 5;
} else {
cur += 3;
}
blen = (b0 >> 2) + 1;
}
dst_pos += blen;
if (offset - 1u >= dst_pos || bytes_left < blen) break;
bytes_left -= blen;
} else if (b0 < 4 * 4) {
// 0000xx00: short literal
blen = (b0 >> 2) + 1;
offset = -(int32_t)(cur + 1);
cur += 1 + blen;
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
} else {
// xxxxxx00: literal
blen = b0 >> 2;
if (blen >= 60) {
uint32_t num_bytes = blen - 59;
blen = byte_access(s, cur + 1);
if (num_bytes > 1) {
blen |= byte_access(s, cur + 2) << 8;
if (num_bytes > 2) {
blen |= byte_access(s, cur + 3) << 16;
if (num_bytes > 3) { blen |= byte_access(s, cur + 4) << 24; }
}
}
cur += num_bytes;
}
cur += 1;
blen += 1;
offset = -(int32_t)cur;
cur += blen;
// Wait for prefetcher
s->q.prefetch_rdpos = cur;
#pragma unroll(1) // We don't want unrolling here
while (s->q.prefetch_wrpos < min(cur + 5 * batch_size, end)) {
busy_wait(10);
}
dst_pos += blen;
if (bytes_left < blen) break;
bytes_left -= blen;
}
b[batch_len].len = blen;
b[batch_len].offset = offset;
batch_len++;
}
if (batch_len != 0) {
s->q.batch_len[batch] = batch_len;
batch = (batch + 1) & (batch_count - 1);
}
}
batch_len = shuffle(batch_len);
if (t == 0) {
while (s->q.batch_len[batch] != 0) {
busy_wait(20);
}
}
if (batch_len != batch_size) { break; }
}
if (!t) {
s->q.prefetch_end = 1;
s->q.batch_len[batch] = -1;
s->bytes_left = bytes_left;
if (bytes_left != 0) { s->error = -2; }
}
}
/**
* @brief process LZ77 symbols and output uncompressed stream
*
* @param s decompression state
* @param t thread id within participating group (lane id)
* @param temp_storage temporary storage used by the algorithm
*
* NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that
*would result in out-of-bounds accesses)
*/
template <typename Storage>
__device__ void snappy_process_symbols(unsnap_state_s* s, int t, Storage& temp_storage)
{
auto const literal_base = s->base;
auto out = s->dst.data();
int batch = 0;
do {
volatile unsnap_batch_s* b = &s->q.batch[batch * batch_size];
int32_t batch_len, blen_t, dist_t;
if (t == 0) {
while ((batch_len = s->q.batch_len[batch]) == 0) {
busy_wait(20);
}
} else {
batch_len = 0;
}
batch_len = shuffle(batch_len);
if (batch_len <= 0) { break; }
if (t < batch_len) {
blen_t = b[t].len;
dist_t = b[t].offset;
} else {
blen_t = dist_t = 0;
}
// Try to combine as many small entries as possible, but try to avoid doing that
// if we see a small repeat distance 8 bytes or less
if (shuffle(min((uint32_t)dist_t, (uint32_t)shuffle_xor(dist_t, 1))) > 8) {
uint32_t n;
do {
uint32_t bofs = WarpReducePos32(blen_t, t);
uint32_t stop_mask = ballot((uint32_t)dist_t < bofs);
uint32_t start_mask =
cub::WarpReduce<uint32_t>(temp_storage).Sum((bofs < 32 && t < batch_len) ? 1 << bofs : 0);
start_mask = shuffle(start_mask);
n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)),
(uint32_t)batch_len);
if (n != 0) {
uint32_t it = __popc(start_mask & ((2 << t) - 1));
uint32_t tr = t - shuffle(bofs - blen_t, it);
int32_t dist = shuffle(dist_t, it);
if (it < n) {
uint8_t const* src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist);
out[t] = *src;
}
out += shuffle(bofs, n - 1);
blen_t = shuffle(blen_t, (n + t) & 0x1f);
dist_t = shuffle(dist_t, (n + t) & 0x1f);
batch_len -= n;
}
} while (n >= 4);
}
for (int i = 0; i < batch_len; i++) {
int32_t blen = shuffle(blen_t, i);
int32_t dist = shuffle(dist_t, i);
int32_t blen2 = (i + 1 < batch_len) ? shuffle(blen_t, i + 1) : 32;
// Try to combine consecutive small entries if they are independent
if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) {
int32_t dist2 = shuffle(dist_t, i + 1);
if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) {
int32_t d;
if (t < blen) {
d = dist;
} else {
dist = dist2;
d = (dist2 <= 0) ? dist2 + blen : dist2;
}
blen += blen2;
if (t < blen) {
uint8_t const* src = (dist > 0) ? (out - d) : (literal_base - d);
out[t] = src[t];
}
out += blen;
i++;
continue;
}
}
if (dist > 0) {
// Copy
uint8_t b0, b1;
if (t < blen) {
uint32_t pos = t;
uint8_t const* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b0 = *src;
}
if (32 + t < blen) {
uint32_t pos = 32 + t;
uint8_t const* src = out + ((pos >= dist) ? (pos % dist) : pos) - dist;
b1 = *src;
}
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
} else {
// Literal
uint8_t b0, b1;
dist = -dist;
while (blen >= 64) {
b0 = literal_base[dist + t];
b1 = literal_base[dist + 32 + t];
out[t] = b0;
out[32 + t] = b1;
dist += 64;
out += 64;
blen -= 64;
}
if (t < blen) { b0 = literal_base[dist + t]; }
if (32 + t < blen) { b1 = literal_base[dist + 32 + t]; }
if (t < blen) { out[t] = b0; }
if (32 + t < blen) { out[32 + t] = b1; }
}
out += blen;
}
__syncwarp();
if (t == 0) { s->q.batch_len[batch] = 0; }
batch = (batch + 1) & (batch_count - 1);
} while (true);
}
/**
* @brief Snappy decompression kernel
* See http://github.com/google/snappy/blob/master/format_description.txt
*
* blockDim {128,1,1}
*
* @param[in] inputs Source & destination information per block
* @param[out] outputs Decompression status per block
*/
template <int block_size>
__global__ void __launch_bounds__(block_size)
unsnap_kernel(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results)
{
__shared__ __align__(16) unsnap_state_s state_g;
__shared__ cub::WarpReduce<uint32_t>::TempStorage temp_storage;
int t = threadIdx.x;
unsnap_state_s* s = &state_g;
int strm_id = blockIdx.x;
if (t < batch_count) { s->q.batch_len[t] = 0; }
__syncthreads();
if (!t) {
s->src = inputs[strm_id];
s->dst = outputs[strm_id];
auto cur = s->src.begin();
auto const end = s->src.end();
s->error = 0;
if (log_cyclecount) { s->tstart = clock(); }
if (cur < end) {
// Read uncompressed size (varint), limited to 32-bit
uint32_t uncompressed_size = *cur++;
if (uncompressed_size > 0x7f) {
uint32_t c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & 0x7f) | (c << 7);
if (uncompressed_size >= (0x80 << 7)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14);
if (uncompressed_size >= (0x80 << 14)) {
c = (cur < end) ? *cur++ : 0;
uncompressed_size =
(uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21);
if (uncompressed_size >= (0x80 << 21)) {
c = (cur < end) ? *cur++ : 0;
if (c < 0x8)
uncompressed_size =
(uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) |
(c << 28);
else
s->error = -1;
}
}
}
}
s->uncompressed_size = uncompressed_size;
s->bytes_left = uncompressed_size;
s->base = cur;
s->end = end;
if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->dst.size())) {
s->error = -1;
}
} else {
s->error = -1;
}
s->q.prefetch_end = 0;
s->q.prefetch_wrpos = 0;
s->q.prefetch_rdpos = 0;
}
__syncthreads();
if (!s->error) {
if (t < 32) {
// WARP0: decode lengths and offsets
snappy_decode_symbols(s, t);
} else if (t < 64) {
// WARP1: prefetch byte stream for WARP0
snappy_prefetch_bytestream(s, t & 0x1f);
} else if (t < 96) {
// WARP2: LZ77
snappy_process_symbols(s, t & 0x1f, temp_storage);
}
__syncthreads();
}
if (!t) {
results[strm_id].bytes_written = s->uncompressed_size - s->bytes_left;
results[strm_id].status =
(s->error == 0) ? compression_status::SUCCESS : compression_status::FAILURE;
if (log_cyclecount) {
results[strm_id].reserved = clock() - s->tstart;
} else {
results[strm_id].reserved = 0;
}
}
}
void gpu_unsnap(device_span<device_span<uint8_t const> const> inputs,
device_span<device_span<uint8_t> const> outputs,
device_span<compression_result> results,
rmm::cuda_stream_view stream)
{
dim3 dim_block(128, 1); // 4 warps per stream, 1 stream per block
dim3 dim_grid(inputs.size(), 1); // TODO: Check max grid dimensions vs max expected count
unsnap_kernel<128><<<dim_grid, dim_block, 0, stream.value()>>>(inputs, outputs, results);
}
} // namespace io
} // namespace cudf
|
a92214f16005b15ad96f7e0b854e9bcbc3f682d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
zsymv.cu is nearly identical to zhemv.cu, just change names and drop cuConj.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
[ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
zhemv_kernel_L(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
const magmaDoubleComplex * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum2;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx [NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial && tx >= partial ) {
sx[tx] = MAGMA_Z_ZERO;
}
else {
sx[tx] = x[0];
}
}
// --------------------
// move to 32x32 diag block
A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 )
sA32(j, tx2) = cuConj( sA32(tx2, j) );
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 )
sA32(j, tx2) = cuConj( sA32(tx2, j) );
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum2 = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum2 += cuConj( sA32(ty2*4 + j, tx2) ) * sx[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum2;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to left most 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind)
A -= blk_ind*lda; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1);
}
x -= blk_ind * incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
work += blk*lda + tx4; // work is work(tx4, blk)
for(int blk2=0; blk2 < blk; ++blk2) {
// load 64x1 block x(blk2_ind + 0:63) into sx2
// since this block is left of diagonal, x cannot be partial rows
if ( ty == 0 ) {
sx2[tx] = x[blk2*NB_X*incx];
}
__syncthreads();
for( int k=0; k < 4; k++ ) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it cannot be partial columns
#pragma unroll
for(int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A * x2
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A**H * x,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for(int j=0; j < 4; j++) {
total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j];
sA16(ty*4 + j, tx) = cuConj( rA[j] ) * sx[tx];
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum2 = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum2 += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums (locally)
psums[k] = psum2;
// move to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial
}
// store partial row sums
#pragma unroll
for(int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums[k];
}
__syncthreads();
// sum up partial row sums and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it cannot be partial rows
if ( ty4 < 4 ) {
int k = ty4*quarter_NB_X;
psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k)
+ sA16(tx4, 2 + k) + sA16(tx4, 3 + k)
+ sA16(tx4, 4 + k) + sA16(tx4, 5 + k)
+ sA16(tx4, 6 + k) + sA16(tx4, 7 + k)
+ sA16(tx4, 8 + k) + sA16(tx4, 9 + k)
+ sA16(tx4, 10 + k) + sA16(tx4, 11 + k)
+ sA16(tx4, 12 + k) + sA16(tx4, 13 + k)
+ sA16(tx4, 14 + k) + sA16(tx4, 15 + k);
work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
work -= tx4; // work is work(blk_ind)
work += tx; // work is work(blk_ind + tx)
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx);
work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
Lower case, sum up final results
On input:
[ A11*x1 A12*x2 A13*x3 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ A11*x1 + A12*x2 + A13*x3 ]
y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y
[ A21*x1 + A22*x2 + A33*x3 ]
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because A13*x3 has 64 rows,
while A31*x1 has only n % NB rows. This is why it used to need
lwork = lda*(blocks + 1) instead of lda*blocks.
********************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n, magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for(int i = blk_ind; i < n; i += NB_X) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/**************************************************************
* Lower case, launch kernels
*/
extern "C"
void magmablas_zhemv_L(
magma_int_t n, magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy,
magmaDoubleComplex *dwork)
{
magma_int_t blocks = (n - 1)/NB_X + 1;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
hipLaunchKernelGGL(( zhemv_kernel_L), dim3(grid), dim3(threads), 0, magma_stream ,
n, A, lda, x, incx, dwork);
dim3 threads_sum( NB_X, 1, 1 );
hipLaunchKernelGGL(( zhemv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, magma_stream ,
n, alpha, lda, beta, y, incy, dwork);
}
/**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in, out]
y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX*16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDA * ceil( N / NB_X ),
where NB_X = 64.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy,
magmaDoubleComplex *dwork, magma_int_t lwork)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
magma_int_t blocks = (n - 1)/NB_X + 1;
magma_int_t lwmin = lda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( lda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
magmablas_zhemv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return info;
}
/**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in, out]
y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( lda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
magmaDoubleComplex *dwork;
magma_int_t blocks = (n - 1)/NB_X + 1;
magma_int_t lwork = lda*blocks;
// TODO deal with error
magma_zmalloc( &dwork, lwork );
magmablas_zhemv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
magma_free( dwork );
}
return info;
}
| a92214f16005b15ad96f7e0b854e9bcbc3f682d6.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
zsymv.cu is nearly identical to zhemv.cu, just change names and drop cuConj.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "common_magma.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/*******************************************************************************
Lower case, compute block multiply, work = A*x, for any size n:
[ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ]
[ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ]
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
********************************************************************/
__global__ void
zhemv_kernel_L(
int n,
const magmaDoubleComplex * __restrict__ A, int lda,
const magmaDoubleComplex * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum2;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx [NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial && tx >= partial ) {
sx[tx] = MAGMA_Z_ZERO;
}
else {
sx[tx] = x[0];
}
}
// --------------------
// move to 32x32 diag block
A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 )
sA32(j, tx2) = cuConj( sA32(tx2, j) );
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying lower to upper triangle
#pragma unroll
for(int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j < tx2 )
sA32(j, tx2) = cuConj( sA32(tx2, j) );
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for(int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum2 = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum2 += cuConj( sA32(ty2*4 + j, tx2) ) * sx[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum2;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to left most 64x64 block in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2)
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind)
A -= blk_ind*lda; // A is A(blk_ind, 0)
A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty)
if ( partial && tx >= partial ) {
A = A - tx + (partial - 1);
}
x -= blk_ind * incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
work += blk*lda + tx4; // work is work(tx4, blk)
for(int blk2=0; blk2 < blk; ++blk2) {
// load 64x1 block x(blk2_ind + 0:63) into sx2
// since this block is left of diagonal, x cannot be partial rows
if ( ty == 0 ) {
sx2[tx] = x[blk2*NB_X*incx];
}
__syncthreads();
for( int k=0; k < 4; k++ ) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
// since this block is left of diagonal, it cannot be partial columns
#pragma unroll
for(int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
// 1) multiply 64x16 block A * x2
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply transposed 16x64 block A**H * x,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for(int j=0; j < 4; j++) {
total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j];
sA16(ty*4 + j, tx) = cuConj( rA[j] ) * sx[tx];
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum2 = MAGMA_Z_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
psum2 += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums (locally)
psums[k] = psum2;
// move to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial
}
// store partial row sums
#pragma unroll
for(int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums[k];
}
__syncthreads();
// sum up partial row sums and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
// since this is the transposed block above the diagonal, it cannot be partial rows
if ( ty4 < 4 ) {
int k = ty4*quarter_NB_X;
psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k)
+ sA16(tx4, 2 + k) + sA16(tx4, 3 + k)
+ sA16(tx4, 4 + k) + sA16(tx4, 5 + k)
+ sA16(tx4, 6 + k) + sA16(tx4, 7 + k)
+ sA16(tx4, 8 + k) + sA16(tx4, 9 + k)
+ sA16(tx4, 10 + k) + sA16(tx4, 11 + k)
+ sA16(tx4, 12 + k) + sA16(tx4, 13 + k)
+ sA16(tx4, 14 + k) + sA16(tx4, 15 + k);
work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
work -= tx4; // work is work(blk_ind)
work += tx; // work is work(blk_ind + tx)
// store row sums
sA16(ty, tx) = total;
__syncthreads();
// sum up final total for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx);
work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
Lower case, sum up final results
On input:
[ A11*x1 A12*x2 A13*x3 ]
work = [ --- (A21*x1 + A22*x2) A23*x3 ]
[ --- --- (A31*x1 + A32*x2 + A33*x3) ]
On output:
[ A11*x1 + A12*x2 + A13*x3 ]
y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y
[ A21*x1 + A22*x2 + A33*x3 ]
Previously:
[ A11*x1 --- ]
work = [ A12*x2 (A21*x1 + A22*x2) --- ]
[ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ]
which doesn't work as well because A13*x3 has 64 rows,
while A31*x1 has only n % NB rows. This is why it used to need
lwork = lda*(blocks + 1) instead of lda*blocks.
********************************************************************/
__global__ void
zhemv_kernel_L_sum(
int n, magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
if ( ind < n ) {
work += ind + blk*lda;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for(int i = blk_ind; i < n; i += NB_X) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
/**************************************************************
* Lower case, launch kernels
*/
extern "C"
void magmablas_zhemv_L(
magma_int_t n, magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy,
magmaDoubleComplex *dwork)
{
magma_int_t blocks = (n - 1)/NB_X + 1;
dim3 grid( blocks, 1, 1 );
dim3 threads( NB_X, NB_Y, 1 );
zhemv_kernel_L<<< grid, threads, 0, magma_stream >>>
(n, A, lda, x, incx, dwork);
dim3 threads_sum( NB_X, 1, 1 );
zhemv_kernel_L_sum<<< grid, threads_sum, 0, magma_stream >>>
(n, alpha, lda, beta, y, incy, dwork);
}
/**
Purpose
-------
magmablas_zhemv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in, out]
y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@param[in]
dwork (workspace) COMPLEX*16 array on the GPU, dimension (MAX(1, LWORK)),
@param[in]
lwork INTEGER.
The dimension of the array DWORK. LWORK >= LDA * ceil( N / NB_X ),
where NB_X = 64.
MAGMA implements zhemv through two steps:
1) perform the multiplication in each thread block and put the
intermediate value in dwork.
2) sum the intermediate values and store the final result in y.
magamblas_zhemv_work requires users to provide a workspace, while
magmablas_zhemv is a wrapper routine allocating the workspace inside the
routine and provides the same interface as cublas.
If users need to call zhemv frequently, we suggest using
magmablas_zhemv_work instead of magmablas_zhemv. As the overhead to
allocate and free in device memory in magmablas_zhemv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when the matrix
size is around 10000.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv_work(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy,
magmaDoubleComplex *dwork, magma_int_t lwork)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
magma_int_t blocks = (n - 1)/NB_X + 1;
magma_int_t lwmin = lda*blocks;
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( lda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
} else if ( lwork < lwmin ) {
info = -12;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
magmablas_zhemv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return info;
}
/**
Purpose
-------
magmablas_zhemv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n Hermitian matrix.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
- = MagmaUpper: Only the upper triangular part of A is to be referenced.
- = MagmaLower: Only the lower triangular part of A is to be referenced.
@param[in]
n INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
@param[in]
alpha COMPLEX*16.
On entry, ALPHA specifies the scalar alpha.
@param[in]
A COMPLEX*16 array of DIMENSION ( LDA, n ).
Before entry with UPLO = MagmaUpper, the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the Hermitian matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = MagmaLower, the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the Hermitian matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
@param[in]
lda INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
@param[in]
x COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
@param[in]
incx INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
@param[in]
beta COMPLEX*16.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[in, out]
y COMPLEX*16 array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
@param[in]
incy INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
@ingroup magma_zblas2
********************************************************************/
extern "C"
magma_int_t
magmablas_zhemv(
magma_uplo_t uplo, magma_int_t n,
magmaDoubleComplex alpha,
const magmaDoubleComplex *A, magma_int_t lda,
const magmaDoubleComplex *x, magma_int_t incx,
magmaDoubleComplex beta,
magmaDoubleComplex *y, magma_int_t incy)
{
#if defined(PRECISION_z)
// z precision requires CUDA ARCH 2.x; call CUBLAS version instead.
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
return MAGMA_SUCCESS;
}
#endif
// --------------------
// [sdc] precisions, or z precision with CUDA ARCH 2.x
int upper = (uplo == MagmaUpper);
/*
* Test the input parameters.
*/
magma_int_t info = 0;
if ((! upper) && (uplo != MagmaLower)) {
info = -1;
} else if ( n < 0 ) {
info = -2;
} else if ( lda < max(1, n) ) {
info = -5;
} else if ( incx == 0 ) {
info = -7;
} else if ( incy == 0 ) {
info = -10;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return info;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) )
return info;
/* TODO: Upper case is not implemented in MAGMA */
if ( upper ) {
magma_zhemv( uplo, n, alpha, A, lda, x, incx, beta, y, incy);
}
else {
magmaDoubleComplex *dwork;
magma_int_t blocks = (n - 1)/NB_X + 1;
magma_int_t lwork = lda*blocks;
// TODO deal with error
magma_zmalloc( &dwork, lwork );
magmablas_zhemv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
magma_free( dwork );
}
return info;
}
|
d09ed5d209dc6c60817c1caa675ede4515207365.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeGrid.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *read = NULL;
hipMalloc(&read, XSIZE*YSIZE);
float *write = NULL;
hipMalloc(&write, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeGrid), dim3(gridBlock),dim3(threadBlock), 0, 0, read,write);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeGrid), dim3(gridBlock),dim3(threadBlock), 0, 0, read,write);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeGrid), dim3(gridBlock),dim3(threadBlock), 0, 0, read,write);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d09ed5d209dc6c60817c1caa675ede4515207365.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeGrid.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *read = NULL;
cudaMalloc(&read, XSIZE*YSIZE);
float *write = NULL;
cudaMalloc(&write, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeGrid<<<gridBlock,threadBlock>>>(read,write);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeGrid<<<gridBlock,threadBlock>>>(read,write);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeGrid<<<gridBlock,threadBlock>>>(read,write);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b8e431aad1a6259e0be55f5b1f1b29e2d643f401.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************
* File: helper.cu
*
*
*************************************************************/
#include <ctime>
#include <random>
#include "helper.h"
#include "rocblas.h"
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "hip/hip_runtime.h"
#include "device.h"
#include "fp16_conversion.h"
/*************************************************************
* KERNEL FUNCTIONS
*************************************************************/
/**************************************************************
*
*
* SINGLE PRECISION FLOATING POINT KERNELS
*
*
**************************************************************/
__global__ void CrossEntropyLoss_Derivative_Gpu(const float * neural_out, const float * expect_out, float * loss_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
loss_dvt[tid] = -expect_out[tid] / neural_out[tid] + (1 - expect_out[tid]) / (1 - neural_out[tid]);
}
}
__global__ void Sigmoid_Gpu(const float * z, float * output, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
output[tid] = 1.0 / (1.0 + exp(-z[tid]));
}
}
__global__ void Sigmoid_Dev_Gpu(const float * output, float * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
act_dvt[tid] = output[tid] * (1 - output[tid]);
}
}
__global__ void Err_Dev_Gpu(const float * error_signal, const float * act_dvt, float * err_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
err_dvt[tid] = error_signal[tid] * act_dvt[tid];
}
}
__global__ void Update_Param_Gpu(float * x, float * dx, float ALPHA, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// x = x + alpha * dx
x[tid] = x[tid] + ALPHA * dx[tid];
// clear dx
dx[tid] = 0;
}
}
__global__ void Softmax_Gpu(const float * z, float * output, const int n)
{
float sum = 0.0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
output[tid] = exp(z[tid]);
}
__syncthreads();
if(tid < n)
{
for(int i = 0; i < n; i++)
{
sum += output[i];
}
}
__syncthreads();
if(tid < n)
{
output[tid] /= sum;
}
}
__global__ void Softmax_Dev_Gpu(const float * output, float * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
act_dvt[tid] = output[tid] * (1 - output[tid]);
}
}
__global__ void fill_rand_gpu(float * array, int seed, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
hiprandState_t state;
hiprand_init(seed, tid, 0, &state);
// create random number in range [-0.5 , 0.5] with uniform distribution
array[tid] = hiprand_uniform(&state) - 0.5;
}
}
__global__ void fill_zero_gpu(float * array, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
array[tid] = 0;
}
}
/**************************************************************
*
*
* HALF PRECISION FLOATING POINT KERNELS
*
*
**************************************************************/
__global__ void h_add_vectors(const half* x, half* y, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
half alpha = __float2half(1.0);
// y = alpha * x + y
y[tid] = __hfma(alpha, x[tid], y[tid]);
}
}
__global__ void cvt_float2half_gpu(const float * src, layer_param_t dst, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
dst[tid] = __float2half(src[tid]);
}
}
__global__ void cvt_half2float_gpu(const layer_param_t src, float * dst, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
dst[tid] = __half2float(src[tid]);
}
}
__global__ void h_fill_zero_gpu(half * array, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
array[tid] = __float2half(0);
}
}
__global__ void h_fill_rand_gpu(half * array, int seed, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
hiprandState_t state;
hiprand_init(seed, tid, 0, &state);
// create random number in range [-0.5 , 0.5] with uniform distribution
array[tid] = __float2half( hiprand_uniform(&state) - 0.5 );
}
}
__global__ void h_Softmax_Dev_Gpu(const half * output, half * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// act_dvt[tid] = output[tid] * (1 - output[tid]);
half one = __float2half(1.0);
half minus_one = __float2half(-1.0);
half temp = __hfma(minus_one, output[tid], one);
act_dvt[tid] = __hmul(output[tid], temp);
}
}
__global__ void h_Softmax_Gpu(const half * z, half * output, const int n)
{
half sum = __float2half(0.0);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// output[tid] = exp(z[tid]);
output[tid] = hexp(z[tid]);
}
__syncthreads();
if(tid < n)
{
for(int i = 0; i < n; i++)
{
// sum += output[i];
sum = __hadd( sum , output[i] );
}
}
__syncthreads();
if(tid < n)
{
// output[tid] /= sum;
output[tid] = __hdiv(output[tid], sum);
}
}
__global__ void h_Update_Param_Gpu(half * x, half * dx, float ALPHA, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
half h_alpha = __float2half(ALPHA);
// x = x + alpha * dx
// x[tid] = x[tid] + ALPHA * dx[tid];
x[tid] = __hfma(h_alpha, dx[tid], x[tid]);
// clear dx
// dx[tid] = 0;
dx[tid] = __float2half(0.0);
}
}
__global__ void h_Err_Dev_Gpu(const half * error_signal, const half * act_dvt, half * err_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// err_dvt[tid] = error_signal[tid] * act_dvt[tid];
err_dvt[tid] = __hmul(error_signal[tid], act_dvt[tid]);
}
}
__global__ void h_Sigmoid_Dev_Gpu(const half * output, half * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// act_dvt[tid] = output[tid] * (1 - output[tid]);
half one = __float2half(1.0);
half minus_one = __float2half(-1.0);
half temp = __hfma(minus_one, output[tid], one);
act_dvt[tid] = __hmul(output[tid], temp);
}
}
__global__ void h_Sigmoid_Gpu(const half * z, half * output, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// output[tid] = 1.0 / (1.0 + exp(-z[tid]));
half exponent = hexp(z[tid]);
half num = __float2half(1.0);
half divisor = __hfma(num, exponent, num);
output[tid] = __hdiv(exponent, divisor);
}
}
__global__ void h_CrossEntropyLoss_Derivative_Gpu(const half * neural_out, const half * expect_out, half * loss_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// loss_dvt[tid] = -expect_out[tid] / neural_out[tid] + (1 - expect_out[tid]) / (1 - neural_out[tid]);
half minus_one = __float2half(-1.0);
half one = __float2half(1.0);
half x = __hdiv(expect_out[tid], neural_out[tid]);
half y = __hdiv( __hfma(minus_one, expect_out[tid], one), __hfma(minus_one, neural_out[tid], one) );
loss_dvt[tid] = __hfma(minus_one, x, y);
}
}
/*************************************************************
* PUBLIC FUNCTIONS
*************************************************************/
/***************************************
* FLOAT - HALF FLOAT CONVERT FUNCTION
***************************************/
void Helper::cvtfloat2half(const float * src, Layer::layer_param_t dst, const int n_elements)
{
hipLaunchKernelGGL(( cvt_float2half_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, src, dst, n_elements);
}
void Helper::cvthalf2float(const Layer::layer_param_t src, float * dst, const int n_elements)
{
hipLaunchKernelGGL(( cvt_half2float_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, src, dst, n_elements);
}
/***************************************
* MEMORY ALLOCATION FUNCTION
***************************************/
void Helper::cuda_array_random_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(float));
// Fill with random number
hipLaunchKernelGGL(( fill_rand_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, *array, time(NULL), size);
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(half));
hipLaunchKernelGGL(( h_fill_rand_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, *array, time(NULL), size);
}
}
void Helper::cuda_array_zero_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(float));
// fill with zero
hipLaunchKernelGGL(( fill_zero_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, *array, size);
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(half));
hipLaunchKernelGGL(( h_fill_zero_gpu), dim3(CUDA_BLOCKS(size)), dim3(Device::total_threads), 0, 0, *array, size);
}
}
void Helper::cuda_array_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(float));
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
hipMalloc(array, size * sizeof(half));
}
}
/***************************************
* LAYER SUB-CALCULATION FUNCTION
***************************************/
void Helper::net_calc(const Layer::layer_param_t input, const Layer::layer_param_t w,
const Layer::layer_param_t b, Layer::layer_param_t z,
int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 0.0;
int m = 1; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = total_inputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
float *mat_a = (float *)input; // Matrix A
float *mat_b = (float *)w; // Matrix B
float *mat_c = z; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_N; // op(A) = A
hipblasOperation_t op_B = HIPBLAS_OP_N; // op(B) = B
// calculate z = x*W
hipblasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n , k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
// add bias z = bias + z
hipblasSaxpy(Device::Device_Get_Handle(), n, &alpha, b, 1, z, 1);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(0.0);
int m = 1; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = total_inputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
half *mat_a = input; // Matrix A
half *mat_b = w; // Matrix B
half *mat_c = z; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_N; // op(A) = A
hipblasOperation_t op_B = HIPBLAS_OP_N; // op(B) = B
// calculate z = x*W
hipblasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n , k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
// add bias z = bias + z
hipLaunchKernelGGL(( h_add_vectors), dim3(CUDA_BLOCKS(total_outputs)), dim3(Device::total_threads), 0, 0, b, z, total_outputs);
}
}
void Helper::sigmoid_calc(const Layer::layer_param_t z, Layer::layer_param_t output, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Sigmoid_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, z, output, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Sigmoid_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, z, output, n);
}
}
void Helper::sigmoid_dev_calc(Layer::layer_param_t output, Layer::layer_param_t act_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Sigmoid_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, output, act_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Sigmoid_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, output, act_dvt, n);
}
}
void Helper::softmax_calc(const Layer::layer_param_t z, Layer::layer_param_t output, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Softmax_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, z, output, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Softmax_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, z, output, n);
}
}
void Helper::softmax_dev_calc(const Layer::layer_param_t output, Layer::layer_param_t act_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Softmax_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, output, act_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Softmax_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, output, act_dvt, n);
}
}
void Helper::err_dev_calc(Layer::layer_param_t error_signal, Layer::layer_param_t act_dvt,
Layer::layer_param_t err_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Err_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, error_signal, act_dvt, err_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Err_Dev_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, error_signal, act_dvt, err_dvt, n);
}
}
void Helper::accum_w_grad(Layer::layer_param_t input, Layer::layer_param_t err_dvt,
Layer::layer_param_t w_grad, int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 1.0;
int m = total_inputs; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = 1; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = 1; // leading dimension of matrix B
int ldc = m; // leading dimension of matrix C
float *mat_a = input; // Matrix A
float *mat_b = err_dvt; // Matrix B
float *mat_c = w_grad; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_T; // op(A) = A'
hipblasOperation_t op_B = HIPBLAS_OP_N; // op(B) = B
// calculate C = alpha * A * B + beta * C
// the formula is dW = dW + transpose(input) * err_dvt
hipblasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(1.0);
int m = total_inputs; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = 1; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = 1; // leading dimension of matrix B
int ldc = m; // leading dimension of matrix C
half *mat_a = input; // Matrix A
half *mat_b = err_dvt; // Matrix B
half *mat_c = w_grad; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_T; // op(A) = A'
hipblasOperation_t op_B = HIPBLAS_OP_N; // op(B) = B
// calculate C = alpha * A * B + beta * C
// the formula is dW = dW + transpose(input) * err_dvt
hipblasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
}
void Helper::accum_b_grad(Layer::layer_param_t err_dvt, Layer::layer_param_t b_grad, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float * x = err_dvt;
float * y = b_grad;
hipblasSaxpy(Device::Device_Get_Handle(), n, &alpha, x, 1, y, 1);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half * x = err_dvt;
half * y = b_grad;
hipLaunchKernelGGL(( h_add_vectors), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, x, y, n);
}
}
void Helper::err_signal_calc(const Layer::layer_param_t w, const Layer::layer_param_t err_dvt,
Layer::layer_param_t propagate_err, int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 0.0;
int m = 1; // number of rows of matrix op(A) and C
int n = total_inputs; // number of columns of matrix op (B) and C
int k = total_outputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
float *mat_a = err_dvt; // Matrix A
float *mat_b = w; // Matrix B
float *mat_c = propagate_err; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_N; // op(A) = A
hipblasOperation_t op_B = HIPBLAS_OP_T; // op(B) = B'
// calculate C = alpha * A * B + beta * C
// the formula is pre_err = err_dvt * transpose(W)
hipblasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(0.0);
int m = 1; // number of rows of matrix op(A) and C
int n = total_inputs; // number of columns of matrix op (B) and C
int k = total_outputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
half *mat_a = err_dvt; // Matrix A
half *mat_b = w; // Matrix B
half *mat_c = propagate_err; // Matrix C
hipblasOperation_t op_A = HIPBLAS_OP_N; // op(A) = A
hipblasOperation_t op_B = HIPBLAS_OP_T; // op(B) = B'
// calculate C = alpha * A * B + beta * C
// the formula is pre_err = err_dvt * transpose(W)
hipblasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
}
void Helper::update_param(Layer::layer_param_t x, Layer::layer_param_t dx, float ALPHA, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( Update_Param_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, x, dx, ALPHA, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_Update_Param_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, x, dx, ALPHA, n);
}
}
/***************************************
* LOSS FUNCTION
***************************************/
void Helper::Cross_Entropy_Loss(const float * neural_out, const float * expect_out, float * loss, int n)
{
float sum = 0.0;
for(int i = 0; i < n; i++)
{
sum += -( expect_out[i] * log(neural_out[i]) + (1 - expect_out[i]) * log(1 - neural_out[i]) );
}
*loss = sum / n;
}
void Helper::Cross_Entropy_Loss_Derivative(const Layer::layer_param_t neural_out, const Layer::layer_param_t expect_out,
Layer::layer_param_t loss_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
hipLaunchKernelGGL(( CrossEntropyLoss_Derivative_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, neural_out, expect_out, loss_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
hipLaunchKernelGGL(( h_CrossEntropyLoss_Derivative_Gpu), dim3(CUDA_BLOCKS(n)), dim3(Device::total_threads), 0, 0, neural_out, expect_out, loss_dvt, n);
}
}
Layer::param_type_e Helper::network_type_get(void)
{
#if USING_HALF_FLOAT
return Layer::HALF_FLOAT_TYPE;
#else
return Layer::FLOAT_TYPE;
#endif
}
| b8e431aad1a6259e0be55f5b1f1b29e2d643f401.cu | /*************************************************************
* File: helper.cu
*
*
*************************************************************/
#include <ctime>
#include <random>
#include "helper.h"
#include "cublas_v2.h"
#include "curand.h"
#include "curand_kernel.h"
#include "cuda_runtime.h"
#include "device.h"
#include "fp16_conversion.h"
/*************************************************************
* KERNEL FUNCTIONS
*************************************************************/
/**************************************************************
*
*
* SINGLE PRECISION FLOATING POINT KERNELS
*
*
**************************************************************/
__global__ void CrossEntropyLoss_Derivative_Gpu(const float * neural_out, const float * expect_out, float * loss_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
loss_dvt[tid] = -expect_out[tid] / neural_out[tid] + (1 - expect_out[tid]) / (1 - neural_out[tid]);
}
}
__global__ void Sigmoid_Gpu(const float * z, float * output, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
output[tid] = 1.0 / (1.0 + exp(-z[tid]));
}
}
__global__ void Sigmoid_Dev_Gpu(const float * output, float * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
act_dvt[tid] = output[tid] * (1 - output[tid]);
}
}
__global__ void Err_Dev_Gpu(const float * error_signal, const float * act_dvt, float * err_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
err_dvt[tid] = error_signal[tid] * act_dvt[tid];
}
}
__global__ void Update_Param_Gpu(float * x, float * dx, float ALPHA, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// x = x + alpha * dx
x[tid] = x[tid] + ALPHA * dx[tid];
// clear dx
dx[tid] = 0;
}
}
__global__ void Softmax_Gpu(const float * z, float * output, const int n)
{
float sum = 0.0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
output[tid] = exp(z[tid]);
}
__syncthreads();
if(tid < n)
{
for(int i = 0; i < n; i++)
{
sum += output[i];
}
}
__syncthreads();
if(tid < n)
{
output[tid] /= sum;
}
}
__global__ void Softmax_Dev_Gpu(const float * output, float * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
act_dvt[tid] = output[tid] * (1 - output[tid]);
}
}
__global__ void fill_rand_gpu(float * array, int seed, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
curandState_t state;
curand_init(seed, tid, 0, &state);
// create random number in range [-0.5 , 0.5] with uniform distribution
array[tid] = curand_uniform(&state) - 0.5;
}
}
__global__ void fill_zero_gpu(float * array, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
array[tid] = 0;
}
}
/**************************************************************
*
*
* HALF PRECISION FLOATING POINT KERNELS
*
*
**************************************************************/
__global__ void h_add_vectors(const half* x, half* y, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
half alpha = __float2half(1.0);
// y = alpha * x + y
y[tid] = __hfma(alpha, x[tid], y[tid]);
}
}
__global__ void cvt_float2half_gpu(const float * src, layer_param_t dst, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
dst[tid] = __float2half(src[tid]);
}
}
__global__ void cvt_half2float_gpu(const layer_param_t src, float * dst, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
dst[tid] = __half2float(src[tid]);
}
}
__global__ void h_fill_zero_gpu(half * array, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
array[tid] = __float2half(0);
}
}
__global__ void h_fill_rand_gpu(half * array, int seed, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
curandState_t state;
curand_init(seed, tid, 0, &state);
// create random number in range [-0.5 , 0.5] with uniform distribution
array[tid] = __float2half( curand_uniform(&state) - 0.5 );
}
}
__global__ void h_Softmax_Dev_Gpu(const half * output, half * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// act_dvt[tid] = output[tid] * (1 - output[tid]);
half one = __float2half(1.0);
half minus_one = __float2half(-1.0);
half temp = __hfma(minus_one, output[tid], one);
act_dvt[tid] = __hmul(output[tid], temp);
}
}
__global__ void h_Softmax_Gpu(const half * z, half * output, const int n)
{
half sum = __float2half(0.0);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// output[tid] = exp(z[tid]);
output[tid] = hexp(z[tid]);
}
__syncthreads();
if(tid < n)
{
for(int i = 0; i < n; i++)
{
// sum += output[i];
sum = __hadd( sum , output[i] );
}
}
__syncthreads();
if(tid < n)
{
// output[tid] /= sum;
output[tid] = __hdiv(output[tid], sum);
}
}
__global__ void h_Update_Param_Gpu(half * x, half * dx, float ALPHA, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
half h_alpha = __float2half(ALPHA);
// x = x + alpha * dx
// x[tid] = x[tid] + ALPHA * dx[tid];
x[tid] = __hfma(h_alpha, dx[tid], x[tid]);
// clear dx
// dx[tid] = 0;
dx[tid] = __float2half(0.0);
}
}
__global__ void h_Err_Dev_Gpu(const half * error_signal, const half * act_dvt, half * err_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// err_dvt[tid] = error_signal[tid] * act_dvt[tid];
err_dvt[tid] = __hmul(error_signal[tid], act_dvt[tid]);
}
}
__global__ void h_Sigmoid_Dev_Gpu(const half * output, half * act_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// act_dvt[tid] = output[tid] * (1 - output[tid]);
half one = __float2half(1.0);
half minus_one = __float2half(-1.0);
half temp = __hfma(minus_one, output[tid], one);
act_dvt[tid] = __hmul(output[tid], temp);
}
}
__global__ void h_Sigmoid_Gpu(const half * z, half * output, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// output[tid] = 1.0 / (1.0 + exp(-z[tid]));
half exponent = hexp(z[tid]);
half num = __float2half(1.0);
half divisor = __hfma(num, exponent, num);
output[tid] = __hdiv(exponent, divisor);
}
}
__global__ void h_CrossEntropyLoss_Derivative_Gpu(const half * neural_out, const half * expect_out, half * loss_dvt, const int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < n)
{
// loss_dvt[tid] = -expect_out[tid] / neural_out[tid] + (1 - expect_out[tid]) / (1 - neural_out[tid]);
half minus_one = __float2half(-1.0);
half one = __float2half(1.0);
half x = __hdiv(expect_out[tid], neural_out[tid]);
half y = __hdiv( __hfma(minus_one, expect_out[tid], one), __hfma(minus_one, neural_out[tid], one) );
loss_dvt[tid] = __hfma(minus_one, x, y);
}
}
/*************************************************************
* PUBLIC FUNCTIONS
*************************************************************/
/***************************************
* FLOAT - HALF FLOAT CONVERT FUNCTION
***************************************/
void Helper::cvtfloat2half(const float * src, Layer::layer_param_t dst, const int n_elements)
{
cvt_float2half_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(src, dst, n_elements);
}
void Helper::cvthalf2float(const Layer::layer_param_t src, float * dst, const int n_elements)
{
cvt_half2float_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(src, dst, n_elements);
}
/***************************************
* MEMORY ALLOCATION FUNCTION
***************************************/
void Helper::cuda_array_random_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(float));
// Fill with random number
fill_rand_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(*array, time(NULL), size);
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(half));
h_fill_rand_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(*array, time(NULL), size);
}
}
void Helper::cuda_array_zero_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(float));
// fill with zero
fill_zero_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(*array, size);
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(half));
h_fill_zero_gpu<<<CUDA_BLOCKS(size), Device::total_threads>>>(*array, size);
}
}
void Helper::cuda_array_allocate(void **array, Layer::param_type_e type, int size)
{
if( type == Layer::FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(float));
}
else if( type == Layer::HALF_FLOAT_TYPE )
{
cudaMalloc(array, size * sizeof(half));
}
}
/***************************************
* LAYER SUB-CALCULATION FUNCTION
***************************************/
void Helper::net_calc(const Layer::layer_param_t input, const Layer::layer_param_t w,
const Layer::layer_param_t b, Layer::layer_param_t z,
int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 0.0;
int m = 1; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = total_inputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
float *mat_a = (float *)input; // Matrix A
float *mat_b = (float *)w; // Matrix B
float *mat_c = z; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_N; // op(A) = A
cublasOperation_t op_B = CUBLAS_OP_N; // op(B) = B
// calculate z = x*W
cublasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n , k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
// add bias z = bias + z
cublasSaxpy(Device::Device_Get_Handle(), n, &alpha, b, 1, z, 1);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(0.0);
int m = 1; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = total_inputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
half *mat_a = input; // Matrix A
half *mat_b = w; // Matrix B
half *mat_c = z; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_N; // op(A) = A
cublasOperation_t op_B = CUBLAS_OP_N; // op(B) = B
// calculate z = x*W
cublasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n , k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
// add bias z = bias + z
h_add_vectors<<<CUDA_BLOCKS(total_outputs), Device::total_threads>>>(b, z, total_outputs);
}
}
void Helper::sigmoid_calc(const Layer::layer_param_t z, Layer::layer_param_t output, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Sigmoid_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(z, output, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Sigmoid_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(z, output, n);
}
}
void Helper::sigmoid_dev_calc(Layer::layer_param_t output, Layer::layer_param_t act_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Sigmoid_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(output, act_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Sigmoid_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(output, act_dvt, n);
}
}
void Helper::softmax_calc(const Layer::layer_param_t z, Layer::layer_param_t output, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Softmax_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(z, output, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Softmax_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(z, output, n);
}
}
void Helper::softmax_dev_calc(const Layer::layer_param_t output, Layer::layer_param_t act_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Softmax_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(output, act_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Softmax_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(output, act_dvt, n);
}
}
void Helper::err_dev_calc(Layer::layer_param_t error_signal, Layer::layer_param_t act_dvt,
Layer::layer_param_t err_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Err_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(error_signal, act_dvt, err_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Err_Dev_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(error_signal, act_dvt, err_dvt, n);
}
}
void Helper::accum_w_grad(Layer::layer_param_t input, Layer::layer_param_t err_dvt,
Layer::layer_param_t w_grad, int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 1.0;
int m = total_inputs; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = 1; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = 1; // leading dimension of matrix B
int ldc = m; // leading dimension of matrix C
float *mat_a = input; // Matrix A
float *mat_b = err_dvt; // Matrix B
float *mat_c = w_grad; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_T; // op(A) = A'
cublasOperation_t op_B = CUBLAS_OP_N; // op(B) = B
// calculate C = alpha * A * B + beta * C
// the formula is dW = dW + transpose(input) * err_dvt
cublasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(1.0);
int m = total_inputs; // number of rows of matrix op(A) and C
int n = total_outputs; // number of columns of matrix op (B) and C
int k = 1; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = 1; // leading dimension of matrix B
int ldc = m; // leading dimension of matrix C
half *mat_a = input; // Matrix A
half *mat_b = err_dvt; // Matrix B
half *mat_c = w_grad; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_T; // op(A) = A'
cublasOperation_t op_B = CUBLAS_OP_N; // op(B) = B
// calculate C = alpha * A * B + beta * C
// the formula is dW = dW + transpose(input) * err_dvt
cublasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
}
void Helper::accum_b_grad(Layer::layer_param_t err_dvt, Layer::layer_param_t b_grad, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float * x = err_dvt;
float * y = b_grad;
cublasSaxpy(Device::Device_Get_Handle(), n, &alpha, x, 1, y, 1);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half * x = err_dvt;
half * y = b_grad;
h_add_vectors<<<CUDA_BLOCKS(n), Device::total_threads>>>(x, y, n);
}
}
void Helper::err_signal_calc(const Layer::layer_param_t w, const Layer::layer_param_t err_dvt,
Layer::layer_param_t propagate_err, int total_inputs, int total_outputs)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
float alpha = 1.0;
float beta = 0.0;
int m = 1; // number of rows of matrix op(A) and C
int n = total_inputs; // number of columns of matrix op (B) and C
int k = total_outputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
float *mat_a = err_dvt; // Matrix A
float *mat_b = w; // Matrix B
float *mat_c = propagate_err; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_N; // op(A) = A
cublasOperation_t op_B = CUBLAS_OP_T; // op(B) = B'
// calculate C = alpha * A * B + beta * C
// the formula is pre_err = err_dvt * transpose(W)
cublasSgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
half alpha = approx_float_to_half(1.0);
half beta = approx_float_to_half(0.0);
int m = 1; // number of rows of matrix op(A) and C
int n = total_inputs; // number of columns of matrix op (B) and C
int k = total_outputs; // number of columns and rows of matrix op(A) and op(B)
int lda = 1; // leading dimension of matrix A
int ldb = total_inputs; // leading dimension of matrix B
int ldc = 1; // leading dimension of matrix C
half *mat_a = err_dvt; // Matrix A
half *mat_b = w; // Matrix B
half *mat_c = propagate_err; // Matrix C
cublasOperation_t op_A = CUBLAS_OP_N; // op(A) = A
cublasOperation_t op_B = CUBLAS_OP_T; // op(B) = B'
// calculate C = alpha * A * B + beta * C
// the formula is pre_err = err_dvt * transpose(W)
cublasHgemm(Device::Device_Get_Handle(),op_A,op_B,\
m , n, k,\
&alpha,\
mat_a , lda,\
mat_b , ldb,\
&beta ,\
mat_c , ldc);
}
}
void Helper::update_param(Layer::layer_param_t x, Layer::layer_param_t dx, float ALPHA, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
Update_Param_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(x, dx, ALPHA, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_Update_Param_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(x, dx, ALPHA, n);
}
}
/***************************************
* LOSS FUNCTION
***************************************/
void Helper::Cross_Entropy_Loss(const float * neural_out, const float * expect_out, float * loss, int n)
{
float sum = 0.0;
for(int i = 0; i < n; i++)
{
sum += -( expect_out[i] * log(neural_out[i]) + (1 - expect_out[i]) * log(1 - neural_out[i]) );
}
*loss = sum / n;
}
void Helper::Cross_Entropy_Loss_Derivative(const Layer::layer_param_t neural_out, const Layer::layer_param_t expect_out,
Layer::layer_param_t loss_dvt, int n)
{
if( Helper::network_type_get() == Layer::FLOAT_TYPE )
{
CrossEntropyLoss_Derivative_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(neural_out, expect_out, loss_dvt, n);
}
else if( Helper::network_type_get() == Layer::HALF_FLOAT_TYPE )
{
h_CrossEntropyLoss_Derivative_Gpu<<<CUDA_BLOCKS(n), Device::total_threads>>>(neural_out, expect_out, loss_dvt, n);
}
}
Layer::param_type_e Helper::network_type_get(void)
{
#if USING_HALF_FLOAT
return Layer::HALF_FLOAT_TYPE;
#else
return Layer::FLOAT_TYPE;
#endif
}
|
302928ecc3908677fad48b1286211afb38fa8069.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// v0.2 modified by WZ
//#include <wb.h>
#include "wb4.h" // use our lib instead (under construction)
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
__global__ void blurKernel(unsigned char* in, unsigned char* out, int w, int h)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h)
{
int pixValR = 0;
int pixValG = 0;
int pixValB = 0;
int pixels = 0;
for(int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE + 1; ++blurRow)
{
for(int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE + 1; ++blurCol)
{
int curRow = y + blurRow;
int curCol = x + blurCol;
if((curRow > -1) && (curRow < h) && (curCol > -1) && (curCol < w))
{
pixValR+= in[(curRow * w + curCol) * 3];
pixValG+= in[(curRow * w + curCol) * 3 + 1];
pixValB+= in[(curRow * w + curCol) * 3 + 2];
pixels++;
}
}
}
int idxR = (y * w + x) * 3;
int idxG = (y * w + x) * 3 + 1;
int idxB = (y * w + x) * 3 + 2;
out[idxR] = (unsigned char)(pixValR/pixels);
out[idxG] = (unsigned char)(pixValG/pixels);
out[idxB] = (unsigned char)(pixValB/pixels);
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
unsigned char *hostInputImageData;
unsigned char *hostOutputImageData;
unsigned char *deviceInputImageData;
unsigned char *deviceOutputImageData;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 1);
printf( "imagem de entrada: %s\n", inputImageFile );
// inputImage = wbImportImage(inputImageFile);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// NOW: input and output images are RGB (3 channel)
outputImage = wbImage_new(imageWidth, imageHeight, 3);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
hipMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3);
hipMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
hipMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3,
hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
int blockSize = 32;
dim3 dimGrid((imageWidth-1)/blockSize + 1, (imageHeight-1)/blockSize+1, 1);
dim3 dimBlock(blockSize, blockSize, 1);
hipLaunchKernelGGL(( blurKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceOutputImageData,
imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
hipMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3,
hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
// DEBUG: if you want to see your image,
// will generate file bellow in current directory
/* wbExport( "blurred.ppm", outputImage ); */
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| 302928ecc3908677fad48b1286211afb38fa8069.cu | // v0.2 modified by WZ
//#include <wb.h>
#include "wb4.h" // use our lib instead (under construction)
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
#define BLUR_SIZE 5
//@@ INSERT CODE HERE
__global__ void blurKernel(unsigned char* in, unsigned char* out, int w, int h)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h)
{
int pixValR = 0;
int pixValG = 0;
int pixValB = 0;
int pixels = 0;
for(int blurRow = -BLUR_SIZE; blurRow < BLUR_SIZE + 1; ++blurRow)
{
for(int blurCol = -BLUR_SIZE; blurCol < BLUR_SIZE + 1; ++blurCol)
{
int curRow = y + blurRow;
int curCol = x + blurCol;
if((curRow > -1) && (curRow < h) && (curCol > -1) && (curCol < w))
{
pixValR+= in[(curRow * w + curCol) * 3];
pixValG+= in[(curRow * w + curCol) * 3 + 1];
pixValB+= in[(curRow * w + curCol) * 3 + 2];
pixels++;
}
}
}
int idxR = (y * w + x) * 3;
int idxG = (y * w + x) * 3 + 1;
int idxB = (y * w + x) * 3 + 2;
out[idxR] = (unsigned char)(pixValR/pixels);
out[idxG] = (unsigned char)(pixValG/pixels);
out[idxB] = (unsigned char)(pixValB/pixels);
}
}
int main(int argc, char *argv[]) {
wbArg_t args;
int imageWidth;
int imageHeight;
char *inputImageFile;
wbImage_t inputImage;
wbImage_t outputImage;
unsigned char *hostInputImageData;
unsigned char *hostOutputImageData;
unsigned char *deviceInputImageData;
unsigned char *deviceOutputImageData;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 1);
printf( "imagem de entrada: %s\n", inputImageFile );
// inputImage = wbImportImage(inputImageFile);
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
// NOW: input and output images are RGB (3 channel)
outputImage = wbImage_new(imageWidth, imageHeight, 3);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
cudaMalloc((void **)&deviceInputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3);
cudaMalloc((void **)&deviceOutputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
cudaMemcpy(deviceInputImageData, hostInputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3,
cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
///////////////////////////////////////////////////////
wbTime_start(Compute, "Doing the computation on the GPU");
int blockSize = 32;
dim3 dimGrid((imageWidth-1)/blockSize + 1, (imageHeight-1)/blockSize+1, 1);
dim3 dimBlock(blockSize, blockSize, 1);
blurKernel<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceOutputImageData,
imageWidth, imageHeight);
wbTime_stop(Compute, "Doing the computation on the GPU");
///////////////////////////////////////////////////////
wbTime_start(Copy, "Copying data from the GPU");
cudaMemcpy(hostOutputImageData, deviceOutputImageData,
imageWidth * imageHeight * sizeof(unsigned char) * 3,
cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(args, outputImage);
// DEBUG: if you want to see your image,
// will generate file bellow in current directory
/* wbExport( "blurred.ppm", outputImage ); */
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
ede51f1bbf7a65be480ff67700ec896530299c27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "fix.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <device_atomic_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <cstdlib>
//
#define CHANNELS 3
#define REDCHANNEL 'r'
#define GREENCHANNEL 'g'
#define BLUECHANNEL 'b'
#define GRAYSCLAEREDCHANNEL 0.21
#define GRAYSCLAEGREENCHANNEL 0.71
#define GRAYSCLAEBLUECHANNEL 0.07
#define SOBEL_RADIUS 1
#define TILE_W 16
#define BLOCK_W (TILE_W + 2*SOBEL_RADIUS)
#define ANGLE 50
#define HISTOGRAMMSIZE 256
__global__ void sobelFilterKernelTiledStreams(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int sobel_offset)
{
__shared__ char ds_Img[BLOCK_W][BLOCK_W];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols
int y = by * TILE_W + ty - SOBEL_RADIUS; //rows
//Make sure x/y are not negative
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
//Calc index of global memory
int global_index = sobel_offset + (y * (*cu_image_width) + x);
//Load Data into Shared Memory
//Insert 0 if the thread is supposed to fill the filter radius border of the tile
if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) {
ds_Img[ty][tx] = cu_src_image[global_index];
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
ds_Img[ty][tx] = 0;
}
}
__syncthreads();
//Calc Sobel X & Y if the thread is inside the filter area
if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) &&
(ty >= SOBEL_RADIUS) && (ty <= TILE_W)) {
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the original image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
}
__global__ void sobelFilterKernelStreams(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_sobel)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
//Calc index
int global_index = offset_sobel + (y * (*cu_image_width) + x);
if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) {
//Calc Sobel X & Y if the thread is inside the filter area
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
int sobel_index = offset_sobel + (y + j) * (*cu_image_width) + (x + k);
sobel_gradient_x += cu_src_image[sobel_index] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += cu_src_image[sobel_index] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
cu_dest_image[global_index] = 0;
}
}
}
//Kernel rgb to grayscale function with streams
__global__ void rgbToGrayscaleKernelStream(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_rgb, int offset_gray)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b, gray;
if (x < *cu_image_width && y < *cu_image_height) {
int offset = (y * (*cu_image_width) + x);
int grayOffset = offset + offset_gray;
int rgbOffset = offset_rgb + offset * CHANNELS;
b = cu_src_image[rgbOffset];
g = cu_src_image[rgbOffset + 1];
r = cu_src_image[rgbOffset + 2];
gray = 0.21 * r + 0.71 *g + 0.07 *b;
cu_dest_image[grayOffset] = gray;
}
}
__global__ void getHistogrammTiledKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) {
__shared__ unsigned int smem[HISTOGRAMMSIZE];
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
int shared_index = threadIdx.x + threadIdx.y * blockDim.x;
int stride_shared = blockDim.x * blockDim.y;
int stride_x = blockDim.x * gridDim.x;
int stride_y = blockDim.y * gridDim.y;
//Init shared memory histogramm with 0's
for (int i = shared_index; i < HISTOGRAMMSIZE; i += stride_shared) {
smem[i] = 0;
}
__syncthreads();
//Add data to histogramm in shared memory
while (x < *cu_image_width && y < *cu_image_height) {
int index = y * *cu_image_width + x;
atomicAdd(&(smem[cu_src_image[index]]), 1);
x += stride_x;
y += stride_y;
}
__syncthreads();
/*
long test = 0;
if (threadIdx.x == 0) {
for (int j = 0; j < HISTOGRAMMSIZE; j++) {
test += smem[j];
}
printf("Smem total per block: %lu \n", test);
}*/
//Add shared memory histogramm part to global memory histogramm
for (int i = shared_index; i < HISTOGRAMMSIZE; i+= stride_shared) {
atomicAdd(&(cu_dest_histogramm[i]), smem[i]);
}
__syncthreads();
}
__global__ void getHistogrammKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
int stride_x = blockDim.x * gridDim.x;
int stride_y = blockDim.y * gridDim.y;
while (x < *cu_image_width && y < *cu_image_height) {
int index = y * *cu_image_width + x;
atomicAdd(&(cu_dest_histogramm[cu_src_image[index]]), 1);
x += stride_x;
y += stride_y;
}
}
__global__ void sobelFilterTexture(int *cu_image_width, int *cu_image_height, unsigned char *cu_output, hipTextureObject_t cu_texObj, float theta)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < *cu_image_width - 1 && y < *cu_image_height - 1) {
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
//Calc normalized texture coordinates
float u = (x + k) / (float)*cu_image_width;
float v = (y + j) / (float)*cu_image_height;
// Transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
sobel_gradient_x += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_output[y * *cu_image_width + x] = (unsigned char)sobel_magnitude;
}
};
//Kernel sobel function
__global__ void sobelFilterKernelTiled(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
__shared__ char ds_Img[BLOCK_W][BLOCK_W];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols
int y = by * TILE_W + ty - SOBEL_RADIUS; //rows
//Make sure x/y are not negative
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
//Calc index of global memory
int global_index = (y * (*cu_image_width) + x);
//Load Data into Shared Memory
//Insert 0 if the thread is supposed to fill the filter radius border of the tile
if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) {
ds_Img[ty][tx] = cu_src_image[global_index];
}
else {
ds_Img[ty][tx] = 0;
}
__syncthreads();
//Calc Sobel X & Y if the thread is inside the filter area
if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) &&
(ty >= SOBEL_RADIUS) && (ty <= TILE_W)){
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the original image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
}
__global__ void sobelFilterKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
//Calc index
int global_index = (y * (*cu_image_width) + x);
if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) {
//Calc Sobel X & Y if the thread is inside the filter area
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
cu_dest_image[global_index] = 0;
}
}
}
//Kernel rgb to grayscale function
__global__ void rgbToGrayscaleKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b, gray;
if (x < *cu_image_width && y < *cu_image_height) {
int grayOffset = (y * (*cu_image_width) + x);
int rgbOffset = grayOffset * CHANNELS;
b = cu_src_image[rgbOffset];
g = cu_src_image[rgbOffset + 1];
r = cu_src_image[rgbOffset + 2];
gray = 0.21 * r + 0.71 *g + 0.07 *b;
cu_dest_image[grayOffset] = gray;
}
}
//Kernel ColorChannel function
__global__ void setColorChannelKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, unsigned char *cu_channel_to_keep)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b;
if (x < *cu_image_width && y < *cu_image_height) {
int offset = (y * (*cu_image_width) + x) * CHANNELS;
switch (*cu_channel_to_keep)
{
case BLUECHANNEL:
b = cu_src_image[offset];
g = 0;
r = 0;
break;
case GREENCHANNEL:
b = 0;
g = cu_src_image[offset + 1];
r = 0;
break;
case REDCHANNEL:
b = 0;
g = 0;
r = cu_src_image[offset + 2];
break;
default: //Defaults to REDCHANNEL
b = 0;
g = 0;
r = cu_src_image[offset + 2];
break;
}
cu_dest_image[offset] = b; //B
cu_dest_image[offset + 1] = g; //G
cu_dest_image[offset + 2] = r; //R
}
};
void setColorChannel(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image, unsigned char channel_to_keep)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image, *d_channel_to_keep;
unsigned int imgSize = (image_width * image_height) * CHANNELS * sizeof(unsigned char);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy channel to keep to gpu
err = hipMalloc((void **)&d_channel_to_keep, sizeof(unsigned char));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_channel_to_keep, &channel_to_keep, sizeof(unsigned char), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipMalloc((void **)&d_src_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = hipMalloc((void **)&d_dest_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blcke, n = Anzahl Threads
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
setColorChannelKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image, d_channel_to_keep);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_image_width);
hipFree(d_image_height);
hipFree(d_channel_to_keep);
hipFree(d_src_image);
hipFree(d_dest_image);
}
void rgbToGrayscale(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSizeRgb = (image_width * image_height) * CHANNELS * sizeof(unsigned char);
unsigned int imgSizeGray = (image_width * image_height) * sizeof(unsigned char);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipMalloc((void **)&d_src_image, imgSizeRgb);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_src_image, src_image, imgSizeRgb, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = hipMalloc((void **)&d_dest_image, imgSizeGray);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_dest_image, dest_image, imgSizeGray, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blcke, n = Anzahl Threads
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
//rgbToGrayscaleKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = hipMemcpy(dest_image, d_dest_image, imgSizeGray, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_image_width);
hipFree(d_image_height);
hipFree(d_src_image);
hipFree(d_dest_image);
};
void sobelFilter(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipMalloc((void **)&d_src_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = hipMalloc((void **)&d_dest_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
hipLaunchKernelGGL(( sobelFilterKernel) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, d_src_image, d_dest_image);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_image_width);
hipFree(d_image_height);
hipFree(d_src_image);
hipFree(d_dest_image);
};
void sobelFilterShared(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipMalloc((void **)&d_src_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = hipMalloc((void **)&d_dest_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_dest_image, dest_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Use a Grid with one Block containing Block_width threads
dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1);
//Per Grid N/Tile_wisth blocks
dim3 blocks_per_grid_tiled((image_width - 1) / TILE_W + 1, (image_height - 1) / TILE_W + 1, 1);
sobelFilterKernelTiled << <blocks_per_grid_tiled, threads_per_block_tiled >> >(d_image_width, d_image_height, d_src_image, d_dest_image);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = hipMemcpy(dest_image, d_dest_image, imgSize, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_image_width);
hipFree(d_image_height);
hipFree(d_src_image);
hipFree(d_dest_image);
};
void sobelFilterTexture(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Create ChannelDesc
//Sets output format of the value when the texture is fetched i.e. float texel
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
//Create cuda array
hipArray *cuArray;
//Allocate cuda array
err = hipMallocArray(&cuArray, &channelDesc, image_width, image_height);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image data to cuda array
err = hipMemcpyToArray(cuArray, 0, 0, src_image, image_height * image_width * sizeof(unsigned char), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Set Texture
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuArray;
//Set Texture object params
struct hipTextureDesc textDesc;
memset(&textDesc, 0, sizeof(textDesc));
textDesc.addressMode[0] = hipAddressModeMirror;
textDesc.addressMode[1] = hipAddressModeMirror;
textDesc.filterMode = hipFilterModeLinear;
textDesc.readMode = hipReadModeNormalizedFloat;
textDesc.normalizedCoords = 1;
//Create Texture Object
hipTextureObject_t texObj = 0;
hipCreateTextureObject(&texObj, &resDesc, &textDesc, NULL);
unsigned char *output;
err = hipMalloc(&output, image_height * image_width * sizeof(unsigned char));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
float angle = 0;
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
hipLaunchKernelGGL(( sobelFilterTexture) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, output, texObj, angle);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = hipMemcpy(dest_image, output, imgSize, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
hipFree(d_image_width);
hipFree(d_image_height);
hipDestroyTextureObject(texObj);
hipFreeArray(cuArray);
hipFree(output);
};
void getHistogramm(int image_width, int image_height, unsigned char *src_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image;
unsigned int *d_dest_histogramm;
unsigned int histogramm[HISTOGRAMMSIZE] = { 0 };
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
unsigned int histogrammSize = HISTOGRAMMSIZE * sizeof(unsigned int);
hipError_t err = hipSuccess;
//Set Device
err = hipSetDevice(0);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_image_width, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &image_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipMalloc((void **)&d_image_height, sizeof(int));
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &image_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipMalloc((void **)&d_src_image, imgSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_src_image, src_image, imgSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = hipMalloc((void **)&d_dest_histogramm, histogrammSize);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_dest_histogramm, histogramm, histogrammSize, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
hipLaunchKernelGGL(( getHistogrammTiledKernel) , dim3(blocks_per_grid), dim3(threads_per_block) , 0, 0, d_image_width, d_image_height, d_src_image, d_dest_histogramm);
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = hipDeviceSynchronize();
if (err != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = hipMemcpy(histogramm, d_dest_histogramm, histogrammSize, hipMemcpyDeviceToHost);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
long histoCount = 0;
for (int i = 0; i < HISTOGRAMMSIZE; i++) {
histoCount += histogramm[i];
}
printf("HistogrammSize: %ld \n", histoCount);
hipFree(d_dest_histogramm);
hipFree(d_image_width);
hipFree(d_image_height);
hipFree(d_src_image);
};
void streamAufgabe5(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image, *d_dest_image_sobel;
unsigned int imgSize = image_width * image_height;
unsigned int imgSizeRgb = imgSize * CHANNELS * sizeof(unsigned char);
unsigned int imgSizeGray = imgSize * sizeof(unsigned char);
//Cuda Stream vars
const unsigned int stream_count = 4;
//Kernel vars
unsigned int threads = 16;
int stream_width = image_width;
int stream_height = image_height / stream_count;
int stream_size = stream_width * stream_height;
int stream_size_gray = stream_size * sizeof(unsigned char);
int stream_size_rgb = stream_size * CHANNELS * sizeof(unsigned char);
//tiled sobel
dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1);
//Per Grid N/Tile_wisth blocks
dim3 blocks_per_grid_tiled((stream_width - 1) / TILE_W + 1, (stream_height - 1) / TILE_W + 1, 1);
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blcke
dim3 blocks_per_grid((stream_width - 1) / threads + 1, (stream_height - 1) / threads + 1, 1);
hipStream_t streams[stream_count];
int dev_count;
hipDeviceProp_t prop;
hipError_t err = hipSuccess;
//Enable device Overlap
err = hipGetDeviceCount(&dev_count);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Set device to a device with overlap property
for (int i = 0; i < dev_count; i++) {
hipGetDeviceProperties(&prop, i);
if (prop.deviceOverlap) {
err = hipSetDevice(i);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
}
err = hipHostMalloc((void **)&d_image_width, sizeof(int), hipHostMallocDefault);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_width, &stream_width, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = hipHostMalloc((void **)&d_image_height, sizeof(int), hipHostMallocDefault);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_image_height, &stream_height, sizeof(int), hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = hipHostMalloc((void **)&d_src_image, imgSizeRgb, hipHostMallocDefault);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Alloc memory for grayscale image
err = hipHostMalloc((void **)&d_dest_image, imgSizeGray, hipHostMallocDefault);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipHostMalloc((void **)&d_dest_image_sobel, imgSizeGray, hipHostMallocDefault);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Create cuda Streams & memory for each stream
for (int i = 0; i < stream_count; i++) {
//Create cuda Streams
err = hipStreamCreate(&streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//fill memory
for (int i = 0; i < stream_count; i++) {
//calc offset for memory copy
int offset_gray = i * stream_size;
int offset_rgb = offset_gray * CHANNELS;
//copy memory for each stream
err = hipMemcpyAsync(&d_src_image[offset_rgb], &src_image[offset_rgb], stream_size_rgb, hipMemcpyHostToDevice, streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpyAsync(&d_dest_image[offset_gray], &dest_image[offset_gray], stream_size_gray, hipMemcpyHostToDevice, streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = hipMemcpyAsync(&d_dest_image_sobel[offset_gray], &dest_image[offset_gray], stream_size_gray, hipMemcpyHostToDevice, streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//execute kernel for grayscale
for (int i = 0; i < stream_count; i++) {
int offset_gray = i * stream_size;
int offset_rgb = offset_gray * CHANNELS;
hipLaunchKernelGGL(( rgbToGrayscaleKernelStream), dim3(blocks_per_grid), dim3(threads_per_block), 0, streams[i], d_image_width, d_image_height, d_src_image, d_dest_image, offset_rgb, offset_gray);
}
for (int i = 0; i < stream_count; i++) {
err = hipStreamSynchronize(streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//execute kernel for sobel
for (int i = 0; i < stream_count; i++) {
int offset_sobel = i * stream_size;
//sobelFilterKernelStreams<<<blocks_per_grid, threads_per_block, 0, streams[i]>>>(d_image_width, d_image_height, d_dest_image, d_dest_image_sobel, offset_sobel);
sobelFilterKernelTiledStreams<<<blocks_per_grid_tiled, threads_per_block_tiled, 0, streams[i] >> >(d_image_width, d_image_height, d_dest_image, d_dest_image_sobel, offset_sobel);
}
for (int i = 0; i < stream_count; i++) {
err = hipStreamSynchronize(streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
// Save grayscale data
for (int i = 0; i < stream_count; i++) {
int offset = i * stream_size;
//printf("offset: %d\n", offset);
err = hipMemcpyAsync(&dest_image[offset], &d_dest_image_sobel[offset], stream_size_gray, hipMemcpyDeviceToHost, streams[i]);
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
hipHostFree(d_image_width);
hipHostFree(d_image_height);
hipHostFree(d_src_image);
hipHostFree(d_dest_image);
hipHostFree(d_dest_image_sobel);
for (int i = 0; i < stream_count; i++) {
hipStreamDestroy(streams[i]);
}
}; | ede51f1bbf7a65be480ff67700ec896530299c27.cu | #include "cuda_runtime.h"
#include "kernel.h"
#include "fix.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <device_atomic_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <memory.h>
#include <math.h>
#include <cstdlib>
//
#define CHANNELS 3
#define REDCHANNEL 'r'
#define GREENCHANNEL 'g'
#define BLUECHANNEL 'b'
#define GRAYSCLAEREDCHANNEL 0.21
#define GRAYSCLAEGREENCHANNEL 0.71
#define GRAYSCLAEBLUECHANNEL 0.07
#define SOBEL_RADIUS 1
#define TILE_W 16
#define BLOCK_W (TILE_W + 2*SOBEL_RADIUS)
#define ANGLE 50
#define HISTOGRAMMSIZE 256
__global__ void sobelFilterKernelTiledStreams(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int sobel_offset)
{
__shared__ char ds_Img[BLOCK_W][BLOCK_W];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols
int y = by * TILE_W + ty - SOBEL_RADIUS; //rows
//Make sure x/y are not negative
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
//Calc index of global memory
int global_index = sobel_offset + (y * (*cu_image_width) + x);
//Load Data into Shared Memory
//Insert 0 if the thread is supposed to fill the filter radius border of the tile
if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) {
ds_Img[ty][tx] = cu_src_image[global_index];
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
ds_Img[ty][tx] = 0;
}
}
__syncthreads();
//Calc Sobel X & Y if the thread is inside the filter area
if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) &&
(ty >= SOBEL_RADIUS) && (ty <= TILE_W)) {
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the original image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
}
__global__ void sobelFilterKernelStreams(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_sobel)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
//Calc index
int global_index = offset_sobel + (y * (*cu_image_width) + x);
if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) {
//Calc Sobel X & Y if the thread is inside the filter area
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
int sobel_index = offset_sobel + (y + j) * (*cu_image_width) + (x + k);
sobel_gradient_x += cu_src_image[sobel_index] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += cu_src_image[sobel_index] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
cu_dest_image[global_index] = 0;
}
}
}
//Kernel rgb to grayscale function with streams
__global__ void rgbToGrayscaleKernelStream(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, int offset_rgb, int offset_gray)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b, gray;
if (x < *cu_image_width && y < *cu_image_height) {
int offset = (y * (*cu_image_width) + x);
int grayOffset = offset + offset_gray;
int rgbOffset = offset_rgb + offset * CHANNELS;
b = cu_src_image[rgbOffset];
g = cu_src_image[rgbOffset + 1];
r = cu_src_image[rgbOffset + 2];
gray = 0.21 * r + 0.71 *g + 0.07 *b;
cu_dest_image[grayOffset] = gray;
}
}
__global__ void getHistogrammTiledKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm) {
__shared__ unsigned int smem[HISTOGRAMMSIZE];
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
int shared_index = threadIdx.x + threadIdx.y * blockDim.x;
int stride_shared = blockDim.x * blockDim.y;
int stride_x = blockDim.x * gridDim.x;
int stride_y = blockDim.y * gridDim.y;
//Init shared memory histogramm with 0's
for (int i = shared_index; i < HISTOGRAMMSIZE; i += stride_shared) {
smem[i] = 0;
}
__syncthreads();
//Add data to histogramm in shared memory
while (x < *cu_image_width && y < *cu_image_height) {
int index = y * *cu_image_width + x;
atomicAdd(&(smem[cu_src_image[index]]), 1);
x += stride_x;
y += stride_y;
}
__syncthreads();
/*
long test = 0;
if (threadIdx.x == 0) {
for (int j = 0; j < HISTOGRAMMSIZE; j++) {
test += smem[j];
}
printf("Smem total per block: %lu \n", test);
}*/
//Add shared memory histogramm part to global memory histogramm
for (int i = shared_index; i < HISTOGRAMMSIZE; i+= stride_shared) {
atomicAdd(&(cu_dest_histogramm[i]), smem[i]);
}
__syncthreads();
}
__global__ void getHistogrammKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned int *cu_dest_histogramm)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
int stride_x = blockDim.x * gridDim.x;
int stride_y = blockDim.y * gridDim.y;
while (x < *cu_image_width && y < *cu_image_height) {
int index = y * *cu_image_width + x;
atomicAdd(&(cu_dest_histogramm[cu_src_image[index]]), 1);
x += stride_x;
y += stride_y;
}
}
__global__ void sobelFilterTexture(int *cu_image_width, int *cu_image_height, unsigned char *cu_output, cudaTextureObject_t cu_texObj, float theta)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < *cu_image_width - 1 && y < *cu_image_height - 1) {
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
//Calc normalized texture coordinates
float u = (x + k) / (float)*cu_image_width;
float v = (y + j) / (float)*cu_image_height;
// Transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
sobel_gradient_x += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += tex2D<float>(cu_texObj, tu, tv) * 255 * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_output[y * *cu_image_width + x] = (unsigned char)sobel_magnitude;
}
};
//Kernel sobel function
__global__ void sobelFilterKernelTiled(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
__shared__ char ds_Img[BLOCK_W][BLOCK_W];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = bx * TILE_W + tx - SOBEL_RADIUS; //cols
int y = by * TILE_W + ty - SOBEL_RADIUS; //rows
//Make sure x/y are not negative
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
//Calc index of global memory
int global_index = (y * (*cu_image_width) + x);
//Load Data into Shared Memory
//Insert 0 if the thread is supposed to fill the filter radius border of the tile
if (x >= 0 && x < *cu_image_width - 1 && y >= 0 && y < *cu_image_height - 1) {
ds_Img[ty][tx] = cu_src_image[global_index];
}
else {
ds_Img[ty][tx] = 0;
}
__syncthreads();
//Calc Sobel X & Y if the thread is inside the filter area
if ((tx >= SOBEL_RADIUS) && (tx <= TILE_W) &&
(ty >= SOBEL_RADIUS) && (ty <= TILE_W)){
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += ds_Img[ty + j][tx + k] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += ds_Img[ty + j][tx + k] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the original image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
}
__global__ void sobelFilterKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
int sobel_x[3][3] = {
{ 1, 0, -1 },
{ 2, 0, -2 },
{ 1, 0, -1 }
};
int sobel_y[3][3] = {
{ 1, 2, 1 },
{ 0, 0, 0 },
{ -1, -2, -1 }
};
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
//Calc index
int global_index = (y * (*cu_image_width) + x);
if (x >= SOBEL_RADIUS && x < *cu_image_width - 1 && y >= SOBEL_RADIUS && y < *cu_image_height - 1) {
//Calc Sobel X & Y if the thread is inside the filter area
int sobel_gradient_y = 0, sobel_gradient_x = 0, sobel_magnitude = 0;
for (int j = -SOBEL_RADIUS; j <= SOBEL_RADIUS; j++) {
for (int k = -SOBEL_RADIUS; k <= SOBEL_RADIUS; k++) {
sobel_gradient_x += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_x[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
sobel_gradient_y += cu_src_image[(y + j) * (*cu_image_width) + (x + k)] * sobel_y[j + SOBEL_RADIUS][k + SOBEL_RADIUS];
}
}
//Calc Sobel magnitude and save it to the image
sobel_magnitude = (int)sqrt((float)pow((float)sobel_gradient_x, 2) + (float)pow((float)sobel_gradient_y, 2));
cu_dest_image[global_index] = (unsigned char)sobel_magnitude;
}
else {
if (x < *cu_image_width && y < *cu_image_height) {
cu_dest_image[global_index] = 0;
}
}
}
//Kernel rgb to grayscale function
__global__ void rgbToGrayscaleKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b, gray;
if (x < *cu_image_width && y < *cu_image_height) {
int grayOffset = (y * (*cu_image_width) + x);
int rgbOffset = grayOffset * CHANNELS;
b = cu_src_image[rgbOffset];
g = cu_src_image[rgbOffset + 1];
r = cu_src_image[rgbOffset + 2];
gray = 0.21 * r + 0.71 *g + 0.07 *b;
cu_dest_image[grayOffset] = gray;
}
}
//Kernel ColorChannel function
__global__ void setColorChannelKernel(int *cu_image_width, int *cu_image_height, unsigned char *cu_src_image, unsigned char *cu_dest_image, unsigned char *cu_channel_to_keep)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; //cols
int y = blockIdx.y * blockDim.y + threadIdx.y; //rows
unsigned char r, g, b;
if (x < *cu_image_width && y < *cu_image_height) {
int offset = (y * (*cu_image_width) + x) * CHANNELS;
switch (*cu_channel_to_keep)
{
case BLUECHANNEL:
b = cu_src_image[offset];
g = 0;
r = 0;
break;
case GREENCHANNEL:
b = 0;
g = cu_src_image[offset + 1];
r = 0;
break;
case REDCHANNEL:
b = 0;
g = 0;
r = cu_src_image[offset + 2];
break;
default: //Defaults to REDCHANNEL
b = 0;
g = 0;
r = cu_src_image[offset + 2];
break;
}
cu_dest_image[offset] = b; //B
cu_dest_image[offset + 1] = g; //G
cu_dest_image[offset + 2] = r; //R
}
};
void setColorChannel(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image, unsigned char channel_to_keep)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image, *d_channel_to_keep;
unsigned int imgSize = (image_width * image_height) * CHANNELS * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy channel to keep to gpu
err = cudaMalloc((void **)&d_channel_to_keep, sizeof(unsigned char));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_channel_to_keep, &channel_to_keep, sizeof(unsigned char), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaMalloc((void **)&d_src_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = cudaMalloc((void **)&d_dest_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blöcke, n = Anzahl Threads
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
setColorChannelKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image, d_channel_to_keep);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaFree(d_channel_to_keep);
cudaFree(d_src_image);
cudaFree(d_dest_image);
}
void rgbToGrayscale(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSizeRgb = (image_width * image_height) * CHANNELS * sizeof(unsigned char);
unsigned int imgSizeGray = (image_width * image_height) * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaMalloc((void **)&d_src_image, imgSizeRgb);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_src_image, src_image, imgSizeRgb, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = cudaMalloc((void **)&d_dest_image, imgSizeGray);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_dest_image, dest_image, imgSizeGray, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blöcke, n = Anzahl Threads
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
//rgbToGrayscaleKernel << <blocks_per_grid, threads_per_block >> >(d_image_width, d_image_height, d_src_image, d_dest_image);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
}
err = cudaMemcpy(dest_image, d_dest_image, imgSizeGray, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaFree(d_src_image);
cudaFree(d_dest_image);
};
void sobelFilter(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaMalloc((void **)&d_src_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = cudaMalloc((void **)&d_dest_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
sobelFilterKernel <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, d_src_image, d_dest_image);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaFree(d_src_image);
cudaFree(d_dest_image);
};
void sobelFilterShared(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaMalloc((void **)&d_src_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = cudaMalloc((void **)&d_dest_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_dest_image, dest_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
// Use a Grid with one Block containing Block_width threads
dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1);
//Per Grid N/Tile_wisth blocks
dim3 blocks_per_grid_tiled((image_width - 1) / TILE_W + 1, (image_height - 1) / TILE_W + 1, 1);
sobelFilterKernelTiled << <blocks_per_grid_tiled, threads_per_block_tiled >> >(d_image_width, d_image_height, d_src_image, d_dest_image);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(dest_image, d_dest_image, imgSize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaFree(d_src_image);
cudaFree(d_dest_image);
};
void sobelFilterTexture(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Create ChannelDesc
//Sets output format of the value when the texture is fetched i.e. float texel
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
//Create cuda array
cudaArray *cuArray;
//Allocate cuda array
err = cudaMallocArray(&cuArray, &channelDesc, image_width, image_height);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image data to cuda array
err = cudaMemcpyToArray(cuArray, 0, 0, src_image, image_height * image_width * sizeof(unsigned char), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Set Texture
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray;
//Set Texture object params
struct cudaTextureDesc textDesc;
memset(&textDesc, 0, sizeof(textDesc));
textDesc.addressMode[0] = cudaAddressModeMirror;
textDesc.addressMode[1] = cudaAddressModeMirror;
textDesc.filterMode = cudaFilterModeLinear;
textDesc.readMode = cudaReadModeNormalizedFloat;
textDesc.normalizedCoords = 1;
//Create Texture Object
cudaTextureObject_t texObj = 0;
cudaCreateTextureObject(&texObj, &resDesc, &textDesc, NULL);
unsigned char *output;
err = cudaMalloc(&output, image_height * image_width * sizeof(unsigned char));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
float angle = 0;
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
sobelFilterTexture <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, output, texObj, angle);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(dest_image, output, imgSize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaDestroyTextureObject(texObj);
cudaFreeArray(cuArray);
cudaFree(output);
};
void getHistogramm(int image_width, int image_height, unsigned char *src_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image;
unsigned int *d_dest_histogramm;
unsigned int histogramm[HISTOGRAMMSIZE] = { 0 };
unsigned int imgSize = (image_width * image_height) * sizeof(unsigned char);
unsigned int histogrammSize = HISTOGRAMMSIZE * sizeof(unsigned int);
cudaError_t err = cudaSuccess;
//Set Device
err = cudaSetDevice(0);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_image_width, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &image_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaMalloc((void **)&d_image_height, sizeof(int));
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &image_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaMalloc((void **)&d_src_image, imgSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_src_image, src_image, imgSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image dest to gpu
err = cudaMalloc((void **)&d_dest_histogramm, histogrammSize);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_dest_histogramm, histogramm, histogrammSize, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
unsigned int threads = 16;
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Per Grid N/16 Blocks
dim3 blocks_per_grid((image_width - 1) / threads + 1, (image_height - 1) / threads + 1, 1);
getHistogrammTiledKernel <<<blocks_per_grid, threads_per_block >>>(d_image_width, d_image_height, d_src_image, d_dest_histogramm);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", err);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(histogramm, d_dest_histogramm, histogrammSize, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
long histoCount = 0;
for (int i = 0; i < HISTOGRAMMSIZE; i++) {
histoCount += histogramm[i];
}
printf("HistogrammSize: %ld \n", histoCount);
cudaFree(d_dest_histogramm);
cudaFree(d_image_width);
cudaFree(d_image_height);
cudaFree(d_src_image);
};
void streamAufgabe5(int image_width, int image_height, unsigned char *src_image, unsigned char *dest_image)
{
int *d_image_width, *d_image_height;
unsigned char *d_src_image, *d_dest_image, *d_dest_image_sobel;
unsigned int imgSize = image_width * image_height;
unsigned int imgSizeRgb = imgSize * CHANNELS * sizeof(unsigned char);
unsigned int imgSizeGray = imgSize * sizeof(unsigned char);
//Cuda Stream vars
const unsigned int stream_count = 4;
//Kernel vars
unsigned int threads = 16;
int stream_width = image_width;
int stream_height = image_height / stream_count;
int stream_size = stream_width * stream_height;
int stream_size_gray = stream_size * sizeof(unsigned char);
int stream_size_rgb = stream_size * CHANNELS * sizeof(unsigned char);
//tiled sobel
dim3 threads_per_block_tiled(BLOCK_W, BLOCK_W, 1);
//Per Grid N/Tile_wisth blocks
dim3 blocks_per_grid_tiled((stream_width - 1) / TILE_W + 1, (stream_height - 1) / TILE_W + 1, 1);
// Use a Grid with one Block containing 16x16 Threads
dim3 threads_per_block(threads, threads, 1);
//Pro Grid N/16 Blöcke
dim3 blocks_per_grid((stream_width - 1) / threads + 1, (stream_height - 1) / threads + 1, 1);
cudaStream_t streams[stream_count];
int dev_count;
cudaDeviceProp prop;
cudaError_t err = cudaSuccess;
//Enable device Overlap
err = cudaGetDeviceCount(&dev_count);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Set device to a device with overlap property
for (int i = 0; i < dev_count; i++) {
cudaGetDeviceProperties(&prop, i);
if (prop.deviceOverlap) {
err = cudaSetDevice(i);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
}
err = cudaHostAlloc((void **)&d_image_width, sizeof(int), cudaHostAllocDefault);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_width, &stream_width, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image height to gpu
err = cudaHostAlloc((void **)&d_image_height, sizeof(int), cudaHostAllocDefault);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_image_height, &stream_height, sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Copy image src to gpu
err = cudaHostAlloc((void **)&d_src_image, imgSizeRgb, cudaHostAllocDefault);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Alloc memory for grayscale image
err = cudaHostAlloc((void **)&d_dest_image, imgSizeGray, cudaHostAllocDefault);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaHostAlloc((void **)&d_dest_image_sobel, imgSizeGray, cudaHostAllocDefault);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
//Create cuda Streams & memory for each stream
for (int i = 0; i < stream_count; i++) {
//Create cuda Streams
err = cudaStreamCreate(&streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//fill memory
for (int i = 0; i < stream_count; i++) {
//calc offset for memory copy
int offset_gray = i * stream_size;
int offset_rgb = offset_gray * CHANNELS;
//copy memory for each stream
err = cudaMemcpyAsync(&d_src_image[offset_rgb], &src_image[offset_rgb], stream_size_rgb, cudaMemcpyHostToDevice, streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpyAsync(&d_dest_image[offset_gray], &dest_image[offset_gray], stream_size_gray, cudaMemcpyHostToDevice, streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMemcpyAsync(&d_dest_image_sobel[offset_gray], &dest_image[offset_gray], stream_size_gray, cudaMemcpyHostToDevice, streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//execute kernel for grayscale
for (int i = 0; i < stream_count; i++) {
int offset_gray = i * stream_size;
int offset_rgb = offset_gray * CHANNELS;
rgbToGrayscaleKernelStream<<<blocks_per_grid, threads_per_block, 0, streams[i]>>>(d_image_width, d_image_height, d_src_image, d_dest_image, offset_rgb, offset_gray);
}
for (int i = 0; i < stream_count; i++) {
err = cudaStreamSynchronize(streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//execute kernel for sobel
for (int i = 0; i < stream_count; i++) {
int offset_sobel = i * stream_size;
//sobelFilterKernelStreams<<<blocks_per_grid, threads_per_block, 0, streams[i]>>>(d_image_width, d_image_height, d_dest_image, d_dest_image_sobel, offset_sobel);
sobelFilterKernelTiledStreams<<<blocks_per_grid_tiled, threads_per_block_tiled, 0, streams[i] >> >(d_image_width, d_image_height, d_dest_image, d_dest_image_sobel, offset_sobel);
}
for (int i = 0; i < stream_count; i++) {
err = cudaStreamSynchronize(streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
// Save grayscale data
for (int i = 0; i < stream_count; i++) {
int offset = i * stream_size;
//printf("offset: %d\n", offset);
err = cudaMemcpyAsync(&dest_image[offset], &d_dest_image_sobel[offset], stream_size_gray, cudaMemcpyDeviceToHost, streams[i]);
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
cudaFreeHost(d_image_width);
cudaFreeHost(d_image_height);
cudaFreeHost(d_src_image);
cudaFreeHost(d_dest_image);
cudaFreeHost(d_dest_image_sobel);
for (int i = 0; i < stream_count; i++) {
cudaStreamDestroy(streams[i]);
}
}; |
5be100c1e9ac02ab25035f1631452b3d7e920b77.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "hip/hip_runtime.h"
#define DATA_SIZE 1 << 28
typedef unsigned char uchar;
typedef unsigned int uint;
typedef unsigned long ulong;
template <uint blockSize>
__device__ void warpReduce(volatile uint* sharedData, int tid) {
sharedData[tid]+=sharedData[tid+32];
sharedData[tid]+=sharedData[tid+16];
sharedData[tid]+=sharedData[tid+8];
sharedData[tid]+=sharedData[tid+4];
sharedData[tid]+=sharedData[tid+2];
sharedData[tid]+=sharedData[tid+1];
}
template <uint blockSize>
__global__ void aggregator(uchar* globalData,ulong* sum) {
uint x = threadIdx.x + blockIdx.x*2*blockDim.x;
uint tid = threadIdx.x;
if (x<DATA_SIZE) {
__shared__ uint sharedData[1024];
sharedData[tid] = globalData[x]+globalData[x+blockDim.x];
__syncthreads();
if (blockSize >=1024) {
if (tid<512)
sharedData[tid]+=sharedData[tid+512];
__syncthreads();
}
if (blockSize >=512) {
if (tid<256)
sharedData[tid]+=sharedData[tid+256];
__syncthreads();
}
if (blockSize >=256) {
if (tid<128)
sharedData[tid]+=sharedData[tid+128];
__syncthreads();
}
if (blockSize >=128) {
if (tid<64)
sharedData[tid]+=sharedData[tid+64];
__syncthreads();
}
if (tid<32)
warpReduce<blockSize>(sharedData,tid);
if (threadIdx.x==0) {
sum[blockIdx.x]+= sharedData[0];
}
}
}
int main(int argc,char** argv) {
srand(2019);
uchar* data = (uchar*) malloc(DATA_SIZE);
for (int i=0;i<DATA_SIZE;i++) {
data[i] = rand()%256;
}
ulong serialCount = 0;
double start = omp_get_wtime();
for (uint i=0;i<DATA_SIZE;i++ ) {
serialCount+=data[i];
}
double end = omp_get_wtime();
double serialDuration = end-start;
printf("Serial operation took %.5f seconds to run. The total is %u, Speed up -\n",serialDuration,serialCount);
ulong parallelCount = 0;
start = omp_get_wtime();
struct CudaContext cudaContext;
cudaContext.init();
const int numberOfThreads = 1024;
const int numberOfBlocks = cudaContext.getBlocks(DATA_SIZE)/2;
ulong* sums = (ulong*) malloc(sizeof(ulong)*numberOfBlocks);
for (uint i=0;i<numberOfBlocks;i++)
sums[i]=0;
hipLaunchKernelGGL(( aggregator<1024>), dim3(numberOfBlocks),dim3(numberOfThreads), 0, 0,
(uchar*) cudaContext.cudaIn((void*) data,DATA_SIZE),
(ulong*) cudaContext.cudaInOut((void*) sums,sizeof(ulong)*numberOfBlocks));
cudaContext.synchronize((void*)sums);
for (int i=0;i<numberOfBlocks;i++) {
parallelCount+=sums[i];
}
end = omp_get_wtime();
double parallelDuration = end - start;
printf("Parallel operation took %.5f seconds to run. The total is %u, Speed up %.2f\n",parallelDuration,parallelCount,serialDuration/parallelDuration);
cudaContext.dispose();
free(data);
free(sums);
printf("Finished");
return 0;
} | 5be100c1e9ac02ab25035f1631452b3d7e920b77.cu | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "cuda.h"
#define DATA_SIZE 1 << 28
typedef unsigned char uchar;
typedef unsigned int uint;
typedef unsigned long ulong;
template <uint blockSize>
__device__ void warpReduce(volatile uint* sharedData, int tid) {
sharedData[tid]+=sharedData[tid+32];
sharedData[tid]+=sharedData[tid+16];
sharedData[tid]+=sharedData[tid+8];
sharedData[tid]+=sharedData[tid+4];
sharedData[tid]+=sharedData[tid+2];
sharedData[tid]+=sharedData[tid+1];
}
template <uint blockSize>
__global__ void aggregator(uchar* globalData,ulong* sum) {
uint x = threadIdx.x + blockIdx.x*2*blockDim.x;
uint tid = threadIdx.x;
if (x<DATA_SIZE) {
__shared__ uint sharedData[1024];
sharedData[tid] = globalData[x]+globalData[x+blockDim.x];
__syncthreads();
if (blockSize >=1024) {
if (tid<512)
sharedData[tid]+=sharedData[tid+512];
__syncthreads();
}
if (blockSize >=512) {
if (tid<256)
sharedData[tid]+=sharedData[tid+256];
__syncthreads();
}
if (blockSize >=256) {
if (tid<128)
sharedData[tid]+=sharedData[tid+128];
__syncthreads();
}
if (blockSize >=128) {
if (tid<64)
sharedData[tid]+=sharedData[tid+64];
__syncthreads();
}
if (tid<32)
warpReduce<blockSize>(sharedData,tid);
if (threadIdx.x==0) {
sum[blockIdx.x]+= sharedData[0];
}
}
}
int main(int argc,char** argv) {
srand(2019);
uchar* data = (uchar*) malloc(DATA_SIZE);
for (int i=0;i<DATA_SIZE;i++) {
data[i] = rand()%256;
}
ulong serialCount = 0;
double start = omp_get_wtime();
for (uint i=0;i<DATA_SIZE;i++ ) {
serialCount+=data[i];
}
double end = omp_get_wtime();
double serialDuration = end-start;
printf("Serial operation took %.5f seconds to run. The total is %u, Speed up -\n",serialDuration,serialCount);
ulong parallelCount = 0;
start = omp_get_wtime();
struct CudaContext cudaContext;
cudaContext.init();
const int numberOfThreads = 1024;
const int numberOfBlocks = cudaContext.getBlocks(DATA_SIZE)/2;
ulong* sums = (ulong*) malloc(sizeof(ulong)*numberOfBlocks);
for (uint i=0;i<numberOfBlocks;i++)
sums[i]=0;
aggregator<1024><<<numberOfBlocks,numberOfThreads>>>(
(uchar*) cudaContext.cudaIn((void*) data,DATA_SIZE),
(ulong*) cudaContext.cudaInOut((void*) sums,sizeof(ulong)*numberOfBlocks));
cudaContext.synchronize((void*)sums);
for (int i=0;i<numberOfBlocks;i++) {
parallelCount+=sums[i];
}
end = omp_get_wtime();
double parallelDuration = end - start;
printf("Parallel operation took %.5f seconds to run. The total is %u, Speed up %.2f\n",parallelDuration,parallelCount,serialDuration/parallelDuration);
cudaContext.dispose();
free(data);
free(sums);
printf("Finished");
return 0;
} |
93b49dcc2243b61e3c5bf556255a7d65c612ecc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include "../common/stopwatch.h"
void initialData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)( rand() ) % 100; //100.0f;
}
return;
}
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%dth element: %d\n", i, in[i]);
}
return;
}
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduceNosync (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invoke
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride];
if(tid == 0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(1), dim3(istride), 0, 0, idata, odata, istride);
}
}
}
__global__ void skalarniPomGPU(int *g_idata, int *g_odata,unsigned int
bid, unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + bid * blockDim.x;
int *odata = &g_odata[bid];
int istride = isize >> 1;
if ( tid < istride)
{
idata[tid]+=idata[tid + istride];
if(tid==0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(1),dim3(istride), 0, 0, idata,odata,istride);
}
}
}
__global__ void skalarni(int* A, int* B, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
A[i]=A[i]*B[i];
}
// __syncthreads();
//__threadfence();
if(tid==0)
{
hipLaunchKernelGGL(( skalarniPomGPU), dim3(1),dim3(blockDim.x), 0, 0, A,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarniUnroll2(int* A, int* B, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + tid;
if (i < N) A[i] = A[i] * B[i];
if (i + blockDim.x < N) {
A[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if(tid==0)
{
hipLaunchKernelGGL(( skalarniPomGPU), dim3(1),dim3(blockDim.x), 0, 0, A,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarniUnroll4(int* A, int* B, int*C, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 4 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if (i + 2 * blockDim.x < N) {
C[i + 2 * blockDim.x] = A[i+ 2 * blockDim.x] * B[i + 2 * blockDim.x];
}
if (i+ 3 * blockDim.x < N) {
C[i + 3 * blockDim.x] = A[i + 3 * blockDim.x] * B[i + 3 * blockDim.x];
}
if(tid==0)
{
hipLaunchKernelGGL(( skalarniPomGPU), dim3(1),dim3(blockDim.x), 0, 0, C,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarni3(int* A, int* B, int* C, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
C[i]=A[i]*B[i];
}
if(tid==0)
{
hipLaunchKernelGGL(( skalarniPomGPU), dim3(1),dim3(blockDim.x), 0, 0, C,g_odata,blockIdx.x,blockDim.x);
}
}
/* Ovi ostali kerneli su po meni nesigurni
jer se ne garantuje sinhronizacija izmedju blokova.
Ali daju korektan rezultat za razlicitu velicinu niza.
*/
/*
__global__ void skalarni2(int* A, int* B, int *g_odata,unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
A[i]=A[i]*B[i];
}
if(blockIdx.x==0 && tid==0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(grid),dim3(blockDim.x), 0, 0, A,g_odata,blockDim.x);
}
} */
__global__ void skalarni2Unroll4(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 4 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if (i + 2 * blockDim.x < N) {
C[i + 2 * blockDim.x] = A[i+ 2 * blockDim.x] * B[i + 2 * blockDim.x];
}
if (i+ 3 * blockDim.x < N) {
C[i + 3 * blockDim.x] = A[i + 3 * blockDim.x] * B[i + 3 * blockDim.x];
}
if(blockIdx.x==0 && tid == 0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(grid),dim3(blockDim.x), 0, 0, C,g_odata,blockDim.x);
}
}
__global__ void skalarni2Unroll2(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if(blockIdx.x==0 && tid == 0)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(grid),dim3(blockDim.x), 0, 0, C,g_odata,blockDim.x);
}
}
__global__ void skalarni2(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < N) C[i] = A[i] * B[i];
if(i==N-1)
{
hipLaunchKernelGGL(( gpuRecursiveReduceNosync), dim3(grid),dim3(blockDim.x), 0, 0, C,g_odata,blockDim.x);
}
}
__global__ void skalarni4(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < N) C[i] = A[i] * B[i];
if(i==N-1)
{
hipLaunchKernelGGL(( reduceNeighbored), dim3(grid),dim3(blockDim.x), 0, 0, C,g_odata,N);
}
}
int cpuSkalarni(int* in1, int* in2, int size)
{
int s=0;
for(int i=0; i<size; i++)
s+=in1[i]*in2[i];
return s;
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int power=22;
int nthreads=512;
if (argc > 1)
{
nthreads = atoi(argv[1]);
}
if (argc > 2)
{
power = atoi(argv[2]);
}
int nElem = 1 << power;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(int);
int *h_A, *h_B, *h_Out;
dim3 block(nthreads);
dim3 grid((nElem + block.x - 1) / block.x);
h_A = (int *)malloc(nBytes);
h_B = (int *)malloc(nBytes);
h_Out = (int *) malloc(grid.x*sizeof(int));
initialData(h_A, nElem);
initialData(h_B, nElem);
Stopwatch s;
int h_s = cpuSkalarni(h_A, h_B, nElem);
printf("\nSkalarniOnCPU- Time elapsed %fsec \n", s.elapsed());
//print(h_A, h_B, nElem);
int *d_A, *d_B, *d_C, *d_Out;
CHECK(hipMalloc((int**)&d_A, nBytes));
CHECK(hipMalloc((int**)&d_B, nBytes));
CHECK(hipMalloc((int**)&d_C, nBytes));
CHECK(hipMalloc((int**)&d_Out, grid.x*sizeof(int)));
//transfer podataka sa host-a na device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
//poziv kernela sa host-a
//printData(h_A,nElem);
//printData(h_B,nElem);
s.reset();
hipLaunchKernelGGL(( skalarni) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_Out, nElem);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n skalarni <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
int d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarniUnroll2) , dim3(grid), dim3(block), 0, 0, d_A, d_B, d_Out, nElem);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n skalarniUnroll2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarniUnroll4) , dim3(grid), dim3(block), 0, 0, d_A, d_B,d_C, d_Out, nElem);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n skalarniUnroll4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarni2), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n Skalarni2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarni3), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,d_Out,nElem);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n Skalarni3 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarni2Unroll2), dim3(grid.x/2),dim3(block), 0, 0, d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n skalarni2Unroll2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++)
{
d_s+=h_Out[i];
// printf("\t %d",h_Out[i]);
}
printf(" %d",h_s);
printf("\n %d \n",d_s);
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarni2Unroll4), dim3(grid.x/4),dim3(block), 0, 0, d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
printf("\n skalarni2Unroll4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d \n",d_s);
CHECK(hipMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
hipLaunchKernelGGL(( skalarni4), dim3(grid),dim3(block), 0, 0, d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(hipDeviceSynchronize());
CHECK(hipGetLastError());
memset(h_Out,0,grid.x*sizeof(int));
printf("\n skalarni4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(hipMemcpy(h_Out, d_Out, grid.x*sizeof(int), hipMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d \n",d_s);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
hipFree(d_Out);
free(h_A);
free(h_B);
free(h_Out);
hipDeviceReset();
return(0);
}
/*
GeForce GTX 1050
SkalarniOnCPU- Time elapsed 0.012361sec
skalarni <<<131072,32>>> Time elapsed 0.645046sec
1677013280
1677013280
skalarniUnroll2 <<<131072,32>>> Time elapsed 0.637585sec
1677013280
1677013280
skalarniUnroll4 <<<131072,32>>> Time elapsed 0.637881sec
1677013280
1677013280
Skalarni2 <<<131072,32>>> Time elapsed 0.429906sec
1677013280
1677013280
Skalarni3 <<<131072,32>>> Time elapsed 0.636618sec
1677013280
1677013280
skalarni2Unroll2 <<<131072,32>>> Time elapsed 0.429518sec
1677013280
1677013280
skalarni2Unroll4 <<<131072,32>>> Time elapsed 0.428573sec
1677013280
1677013280
skalarni4 <<<131072,32>>> Time elapsed 0.009061sec
1677013280
1677013280
==1524== Warning: 4124404 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==1524== Profiling application: a 32
==1524== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 29.36% 1.28975s 0 55872 23.084us 1.1840us 107.42us gpuRecursiveReduceNosync(int*, int*, unsigned int)
14.66% 643.99ms 1 0 643.99ms 643.99ms 643.99ms skalarni(int*, int*, int*, unsigned int)
14.50% 637.14ms 1 0 637.14ms 637.14ms 637.14ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
14.50% 636.85ms 1 0 636.85ms 636.85ms 636.85ms skalarniUnroll2(int*, int*, int*, unsigned int)
14.48% 635.87ms 1 0 635.87ms 635.87ms 635.87ms skalarni3(int*, int*, int*, int*, unsigned int)
9.25% 406.42ms 0 14032 28.963us 16.992us 107.20us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
1.37% 60.250ms 10 - 6.0250ms 5.4468ms 6.5732ms [CUDA memcpy HtoD]
1.08% 47.394ms 1 0 47.394ms 47.394ms 47.394ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.65% 28.343ms 1 0 28.343ms 28.343ms 28.343ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.07% 3.1848ms 1 0 3.1848ms 3.1848ms 3.1848ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.07% 2.9479ms 1 0 2.9479ms 2.9479ms 2.9479ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.02% 663.80us 8 - 82.975us 78.719us 86.271us [CUDA memcpy DtoH]
0.00% 4.8960us 7 - 699ns 640ns 896ns [CUDA memset]
API calls: 90.80% 3.85326s 8 - 481.66ms 8.7100ms 644.96ms hipDeviceSynchronize
5.88% 249.34ms 4 - 62.335ms 557.60us 246.32ms hipMalloc
1.66% 70.373ms 18 - 3.9096ms 394.70us 7.0672ms hipMemcpy
1.58% 66.956ms 1 - 66.956ms 66.956ms 66.956ms hipDeviceReset
0.02% 1.0196ms 4 - 254.90us 153.00us 521.80us hipFree
0.02% 787.80us 7 - 112.54us 51.700us 238.60us hipMemset
0.02% 692.10us 97 - 7.1350us 200ns 322.30us hipDeviceGetAttribute
0.01% 598.30us 1 - 598.30us 598.30us 598.30us hipGetDeviceProperties
0.01% 533.80us 8 - 66.725us 37.700us 218.10us cudaLaunchKernel
0.00% 44.000us 1 - 44.000us 44.000us 44.000us cuDeviceTotalMem
0.00% 13.300us 1 - 13.300us 13.300us 13.300us hipDeviceGetPCIBusId
0.00% 12.300us 1 - 12.300us 12.300us 12.300us hipSetDevice
0.00% 7.5000us 8 - 937ns 800ns 1.0000us hipGetLastError
0.00% 6.8000us 2 - 3.4000us 400ns 6.4000us hipDeviceGet
0.00% 2.1000us 3 - 700ns 400ns 1.1000us hipGetDeviceCount
0.00% 1.3000us 1 - 1.3000us 1.3000us 1.3000us hipDeviceGetName
0.00% 500ns 1 - 500ns 500ns 500ns hipDeviceGetUuid
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetLuid
SkalarniOnCPU- Time elapsed 0.012476sec
skalarni <<<16384,256>>> Time elapsed 0.140732sec
1677013280
1677013280
skalarniUnroll2 <<<16384,256>>> Time elapsed 0.130778sec
1677013280
1677013280
skalarniUnroll4 <<<16384,256>>> Time elapsed 0.132044sec
1677013280
1677013280
Skalarni2 <<<16384,256>>> Time elapsed 0.105880sec
1677013280
1677013280
Skalarni3 <<<16384,256>>> Time elapsed 0.131352sec
1677013280
1677013280
skalarni2Unroll2 <<<16384,256>>> Time elapsed 0.105701sec
1677013280
1677013280
skalarni2Unroll4 <<<16384,256>>> Time elapsed 0.105816sec
1677013280
1677013280
skalarni4 <<<16384,256>>> Time elapsed 0.004052sec
1677013280
1677013280
==1240== Warning: 798452 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==1240== Profiling application: a 256
==1240== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 62.92% 1.50742s 0 61145 24.653us 1.2160us 64.927us gpuRecursiveReduceNosync(int*, int*, unsigned int)
10.36% 248.27ms 0 8759 28.344us 18.336us 61.824us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
5.83% 139.69ms 1 0 139.69ms 139.69ms 139.69ms skalarni(int*, int*, int*, unsigned int)
5.47% 131.12ms 1 0 131.12ms 131.12ms 131.12ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
5.44% 130.34ms 1 0 130.34ms 130.34ms 130.34ms skalarni3(int*, int*, int*, int*, unsigned int)
5.43% 130.00ms 1 0 130.00ms 130.00ms 130.00ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.55% 61.031ms 10 - 6.1031ms 5.6811ms 6.6876ms [CUDA memcpy HtoD]
1.24% 29.623ms 1 0 29.623ms 29.623ms 29.623ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.65% 15.552ms 1 0 15.552ms 15.552ms 15.552ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.05% 1.2330ms 1 0 1.2330ms 1.2330ms 1.2330ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.05% 1.2298ms 1 0 1.2298ms 1.2298ms 1.2298ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.00% 87.327us 8 - 10.915us 10.560us 11.360us [CUDA memcpy DtoH]
0.00% 21.023us 7 - 3.0030us 2.7190us 3.3920us [CUDA memset]
API calls: 68.90% 855.15ms 8 - 106.89ms 3.9697ms 140.64ms hipDeviceSynchronize
19.61% 243.44ms 4 - 60.859ms 583.20us 240.45ms hipMalloc
5.62% 69.803ms 18 - 3.8779ms 244.70us 7.1879ms hipMemcpy
5.56% 69.051ms 1 - 69.051ms 69.051ms 69.051ms hipDeviceReset
0.10% 1.2515ms 4 - 312.88us 223.60us 429.20us hipFree
0.09% 1.0970ms 8 - 137.13us 69.600us 301.10us cudaLaunchKernel
0.04% 497.70us 97 - 5.1300us 200ns 215.70us hipDeviceGetAttribute
0.03% 372.80us 7 - 53.257us 21.900us 204.30us hipMemset
0.03% 370.70us 1 - 370.70us 370.70us 370.70us hipGetDeviceProperties
0.00% 41.700us 1 - 41.700us 41.700us 41.700us cuDeviceTotalMem
0.00% 13.400us 1 - 13.400us 13.400us 13.400us hipDeviceGetPCIBusId
0.00% 12.400us 1 - 12.400us 12.400us 12.400us hipSetDevice
0.00% 6.6000us 2 - 3.3000us 500ns 6.1000us hipDeviceGet
0.00% 6.5000us 8 - 812ns 500ns 1.1000us hipGetLastError
0.00% 2.4000us 3 - 800ns 300ns 1.1000us hipGetDeviceCount
0.00% 1.4000us 1 - 1.4000us 1.4000us 1.4000us hipDeviceGetName
0.00% 800ns 1 - 800ns 800ns 800ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns hipDeviceGetUuid
SkalarniOnCPU- Time elapsed 0.012613sec
skalarni <<<8192,512>>> Time elapsed 0.086526sec
1677013280
1677013280
skalarniUnroll2 <<<8192,512>>> Time elapsed 0.076569sec
1677013280
1677013280
skalarniUnroll4 <<<8192,512>>> Time elapsed 0.076975sec
1677013280
1677013280
Skalarni2 <<<8192,512>>> Time elapsed 0.065407sec
1677013280
1677013280
Skalarni3 <<<8192,512>>> Time elapsed 0.076832sec
1677013280
1677013280
skalarni2Unroll2 <<<8192,512>>> Time elapsed 0.065597sec
1677013280
1677013280
skalarni2Unroll4 <<<8192,512>>> Time elapsed 0.064851sec
1677013280
1677013280
skalarni4 <<<8192,512>>> Time elapsed 0.004588sec
1677013280
1677013280
==3456== Warning: 421620 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==3456== Profiling application: a 512
==3456== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 71.08% 1.54750s 0 62110 24.915us 1.2160us 50.400us gpuRecursiveReduceNosync(int*, int*, unsigned int)
9.79% 213.13ms 0 7794 27.344us 19.136us 47.711us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
3.93% 85.534ms 1 0 85.534ms 85.534ms 85.534ms skalarni(int*, int*, int*, unsigned int)
3.50% 76.160ms 1 0 76.160ms 76.160ms 76.160ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
3.49% 76.075ms 1 0 76.075ms 76.075ms 76.075ms skalarni3(int*, int*, int*, int*, unsigned int)
3.48% 75.695ms 1 0 75.695ms 75.695ms 75.695ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.76% 59.995ms 10 - 5.9995ms 5.4042ms 6.7181ms [CUDA memcpy HtoD]
1.20% 26.159ms 1 0 26.159ms 26.159ms 26.159ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.63% 13.655ms 1 0 13.655ms 13.655ms 13.655ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.09% 1.9138ms 1 0 1.9138ms 1.9138ms 1.9138ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.06% 1.2708ms 1 0 1.2708ms 1.2708ms 1.2708ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.00% 50.591us 8 - 6.3230us 5.6960us 7.5520us [CUDA memcpy DtoH]
0.00% 12.864us 7 - 1.8370us 1.7280us 2.0160us [CUDA memset]
API calls: 57.40% 516.24ms 8 - 64.531ms 4.4770ms 86.437ms hipDeviceSynchronize
27.08% 243.56ms 4 - 60.890ms 467.40us 240.62ms hipMalloc
7.66% 68.855ms 18 - 3.8253ms 201.80us 7.3965ms hipMemcpy
7.40% 66.553ms 1 - 66.553ms 66.553ms 66.553ms hipDeviceReset
0.19% 1.6732ms 4 - 418.30us 170.00us 920.20us hipFree
0.11% 981.90us 8 - 122.74us 69.200us 227.20us cudaLaunchKernel
0.06% 575.00us 7 - 82.142us 22.600us 291.50us hipMemset
0.05% 463.20us 97 - 4.7750us 200ns 207.70us hipDeviceGetAttribute
0.04% 370.10us 1 - 370.10us 370.10us 370.10us hipGetDeviceProperties
0.00% 40.700us 1 - 40.700us 40.700us 40.700us cuDeviceTotalMem
0.00% 13.400us 1 - 13.400us 13.400us 13.400us hipDeviceGetPCIBusId
0.00% 12.900us 1 - 12.900us 12.900us 12.900us hipSetDevice
0.00% 6.7000us 8 - 837ns 500ns 1.0000us hipGetLastError
0.00% 6.3000us 2 - 3.1500us 400ns 5.9000us hipDeviceGet
0.00% 2.5000us 3 - 833ns 400ns 1.1000us hipGetDeviceCount
0.00% 1.5000us 1 - 1.5000us 1.5000us 1.5000us hipDeviceGetName
0.00% 800ns 1 - 800ns 800ns 800ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns hipDeviceGetUuid
SkalarniOnCPU- Time elapsed 0.012840sec
skalarni <<<4096,1024>>> Time elapsed 0.051539sec
1677013280
1677013280
skalarniUnroll2 <<<4096,1024>>> Time elapsed 0.050108sec
1677013280
1677013280
skalarniUnroll4 <<<4096,1024>>> Time elapsed 0.046660sec
1677013280
1677013280
Skalarni2 <<<4096,1024>>> Time elapsed 0.040571sec
1677013280
1677013280
Skalarni3 <<<4096,1024>>> Time elapsed 0.046409sec
1677013280
1677013280
skalarni2Unroll2 <<<4096,1024>>> Time elapsed 0.040531sec
1677013280
1677013280
skalarni2Unroll4 <<<4096,1024>>> Time elapsed 0.040056sec
1677013280
1677013280
skalarni4 <<<4096,1024>>> Time elapsed 0.005426sec
1677013280
1677013280
==5908== Warning: 204532 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==5908== Profiling application: a 1024
==5908== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 77.23% 1.59776s 0 62899 25.401us 1.2800us 183.58us gpuRecursiveReduceNosync(int*, int*, unsigned int)
9.38% 194.08ms 0 7005 27.706us 19.392us 176.99us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
2.88% 59.628ms 10 - 5.9628ms 5.2801ms 6.6448ms [CUDA memcpy HtoD]
2.45% 50.593ms 1 0 50.593ms 50.593ms 50.593ms skalarni(int*, int*, int*, unsigned int)
2.38% 49.227ms 1 0 49.227ms 49.227ms 49.227ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.21% 45.784ms 1 0 45.784ms 45.784ms 45.784ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
2.19% 45.308ms 1 0 45.308ms 45.308ms 45.308ms skalarni3(int*, int*, int*, int*, unsigned int)
0.72% 14.826ms 1 0 14.826ms 14.826ms 14.826ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.41% 8.5484ms 1 0 8.5484ms 8.5484ms 8.5484ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.08% 1.7003ms 1 0 1.7003ms 1.7003ms 1.7003ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.06% 1.3143ms 1 0 1.3143ms 1.3143ms 1.3143ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.00% 26.688us 8 - 3.3360us 2.9440us 4.0960us [CUDA memcpy DtoH]
0.00% 9.4720us 7 - 1.3530us 768ns 1.5680us [CUDA memset]
API calls: 45.30% 319.94ms 8 - 39.993ms 5.3326ms 51.447ms hipDeviceSynchronize
35.36% 249.76ms 4 - 62.439ms 546.70us 246.77ms hipMalloc
9.43% 66.636ms 18 - 3.7020ms 201.60us 7.1874ms hipMemcpy
9.29% 65.641ms 1 - 65.641ms 65.641ms 65.641ms hipDeviceReset
0.15% 1.0551ms 8 - 131.89us 70.000us 237.20us cudaLaunchKernel
0.14% 1.0145ms 7 - 144.93us 22.400us 656.30us hipMemset
0.13% 888.40us 4 - 222.10us 183.50us 309.20us hipFree
0.10% 687.90us 97 - 7.0910us 200ns 313.20us hipDeviceGetAttribute
0.08% 597.90us 1 - 597.90us 597.90us 597.90us hipGetDeviceProperties
0.01% 40.200us 1 - 40.200us 40.200us 40.200us cuDeviceTotalMem
0.00% 13.800us 1 - 13.800us 13.800us 13.800us hipDeviceGetPCIBusId
0.00% 12.800us 1 - 12.800us 12.800us 12.800us hipSetDevice
0.00% 7.7000us 8 - 962ns 900ns 1.1000us hipGetLastError
0.00% 7.6000us 2 - 3.8000us 400ns 7.2000us hipDeviceGet
0.00% 1.9000us 3 - 633ns 300ns 1.1000us hipGetDeviceCount
0.00% 1.4000us 1 - 1.4000us 1.4000us 1.4000us hipDeviceGetName
0.00% 700ns 1 - 700ns 700ns 700ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns hipDeviceGetUuid
*/ | 93b49dcc2243b61e3c5bf556255a7d65c612ecc3.cu | #include "../common/common.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include "../common/stopwatch.h"
void initialData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (int)( rand() ) % 100; //100.0f;
}
return;
}
void printData(int *in, const int size)
{
for (int i = 0; i < size; i++)
{
printf("%dth element: %d\n", i, in[i]);
}
return;
}
__global__ void reduceNeighbored (int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2)
{
if ((tid % (2 * stride)) == 0)
{
idata[tid] += idata[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void gpuRecursiveReduceNosync (int *g_idata, int *g_odata,
unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
int *odata = &g_odata[blockIdx.x];
// stop condition
if (isize == 2 && tid == 0)
{
g_odata[blockIdx.x] = idata[0] + idata[1];
return;
}
// nested invoke
int istride = isize >> 1;
if(istride > 1 && tid < istride)
{
idata[tid] += idata[tid + istride];
if(tid == 0)
{
gpuRecursiveReduceNosync<<<1, istride>>>(idata, odata, istride);
}
}
}
__global__ void skalarniPomGPU(int *g_idata, int *g_odata,unsigned int
bid, unsigned int isize)
{
// set thread ID
unsigned int tid = threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + bid * blockDim.x;
int *odata = &g_odata[bid];
int istride = isize >> 1;
if ( tid < istride)
{
idata[tid]+=idata[tid + istride];
if(tid==0)
{
gpuRecursiveReduceNosync<<<1,istride>>>(idata,odata,istride);
}
}
}
__global__ void skalarni(int* A, int* B, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
A[i]=A[i]*B[i];
}
// __syncthreads();
//__threadfence();
if(tid==0)
{
skalarniPomGPU<<<1,blockDim.x>>>(A,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarniUnroll2(int* A, int* B, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + tid;
if (i < N) A[i] = A[i] * B[i];
if (i + blockDim.x < N) {
A[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if(tid==0)
{
skalarniPomGPU<<<1,blockDim.x>>>(A,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarniUnroll4(int* A, int* B, int*C, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 4 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if (i + 2 * blockDim.x < N) {
C[i + 2 * blockDim.x] = A[i+ 2 * blockDim.x] * B[i + 2 * blockDim.x];
}
if (i+ 3 * blockDim.x < N) {
C[i + 3 * blockDim.x] = A[i + 3 * blockDim.x] * B[i + 3 * blockDim.x];
}
if(tid==0)
{
skalarniPomGPU<<<1,blockDim.x>>>(C,g_odata,blockIdx.x,blockDim.x);
}
}
__global__ void skalarni3(int* A, int* B, int* C, int *g_odata,unsigned int N)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
C[i]=A[i]*B[i];
}
if(tid==0)
{
skalarniPomGPU<<<1,blockDim.x>>>(C,g_odata,blockIdx.x,blockDim.x);
}
}
/* Ovi ostali kerneli su po meni nesigurni
jer se ne garantuje sinhronizacija izmedju blokova.
Ali daju korektan rezultat za razlicitu velicinu niza.
*/
/*
__global__ void skalarni2(int* A, int* B, int *g_odata,unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
if(i<N) {
A[i]=A[i]*B[i];
}
if(blockIdx.x==0 && tid==0)
{
gpuRecursiveReduceNosync<<<grid,blockDim.x>>>(A,g_odata,blockDim.x);
}
} */
__global__ void skalarni2Unroll4(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 4 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if (i + 2 * blockDim.x < N) {
C[i + 2 * blockDim.x] = A[i+ 2 * blockDim.x] * B[i + 2 * blockDim.x];
}
if (i+ 3 * blockDim.x < N) {
C[i + 3 * blockDim.x] = A[i + 3 * blockDim.x] * B[i + 3 * blockDim.x];
}
if(blockIdx.x==0 && tid == 0)
{
gpuRecursiveReduceNosync<<<grid,blockDim.x>>>(C,g_odata,blockDim.x);
}
}
__global__ void skalarni2Unroll2(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + tid;
if (i < N) C[i] = A[i] * B[i];
if (i + blockDim.x < N) {
C[i + blockDim.x] = A[i + blockDim.x] * B[i + blockDim.x];
}
if(blockIdx.x==0 && tid == 0)
{
gpuRecursiveReduceNosync<<<grid,blockDim.x>>>(C,g_odata,blockDim.x);
}
}
__global__ void skalarni2(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < N) C[i] = A[i] * B[i];
if(i==N-1)
{
gpuRecursiveReduceNosync<<<grid,blockDim.x>>>(C,g_odata,blockDim.x);
}
}
__global__ void skalarni4(int* A, int* B,int* C, int *g_odata, const unsigned int N,int grid)
{
unsigned tid=threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < N) C[i] = A[i] * B[i];
if(i==N-1)
{
reduceNeighbored<<<grid,blockDim.x>>>(C,g_odata,N);
}
}
int cpuSkalarni(int* in1, int* in2, int size)
{
int s=0;
for(int i=0; i<size; i++)
s+=in1[i]*in2[i];
return s;
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int power=22;
int nthreads=512;
if (argc > 1)
{
nthreads = atoi(argv[1]);
}
if (argc > 2)
{
power = atoi(argv[2]);
}
int nElem = 1 << power;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(int);
int *h_A, *h_B, *h_Out;
dim3 block(nthreads);
dim3 grid((nElem + block.x - 1) / block.x);
h_A = (int *)malloc(nBytes);
h_B = (int *)malloc(nBytes);
h_Out = (int *) malloc(grid.x*sizeof(int));
initialData(h_A, nElem);
initialData(h_B, nElem);
Stopwatch s;
int h_s = cpuSkalarni(h_A, h_B, nElem);
printf("\nSkalarniOnCPU- Time elapsed %fsec \n", s.elapsed());
//print(h_A, h_B, nElem);
int *d_A, *d_B, *d_C, *d_Out;
CHECK(cudaMalloc((int**)&d_A, nBytes));
CHECK(cudaMalloc((int**)&d_B, nBytes));
CHECK(cudaMalloc((int**)&d_C, nBytes));
CHECK(cudaMalloc((int**)&d_Out, grid.x*sizeof(int)));
//transfer podataka sa host-a na device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
//poziv kernela sa host-a
//printData(h_A,nElem);
//printData(h_B,nElem);
s.reset();
skalarni <<<grid, block>>> (d_A, d_B, d_Out, nElem);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n skalarni <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
int d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarniUnroll2 <<<grid, block>>> (d_A, d_B, d_Out, nElem);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n skalarniUnroll2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarniUnroll4 <<<grid, block>>> (d_A, d_B,d_C, d_Out, nElem);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n skalarniUnroll4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarni2<<<grid,block>>>(d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n Skalarni2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarni3<<<grid,block>>>(d_A,d_B,d_C,d_Out,nElem);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n Skalarni3 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d\n",d_s);
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarni2Unroll2<<<grid.x/2,block>>>(d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n skalarni2Unroll2 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++)
{
d_s+=h_Out[i];
// printf("\t %d",h_Out[i]);
}
printf(" %d",h_s);
printf("\n %d \n",d_s);
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarni2Unroll4<<<grid.x/4,block>>>(d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
printf("\n skalarni2Unroll4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d \n",d_s);
CHECK(cudaMemset(d_Out,0,grid.x*sizeof(int)));
s.reset();
skalarni4<<<grid,block>>>(d_A,d_B,d_C,d_Out,nElem,grid.x);
CHECK(cudaDeviceSynchronize());
CHECK(cudaGetLastError());
memset(h_Out,0,grid.x*sizeof(int));
printf("\n skalarni4 <<<%d,%d>>> Time elapsed %f" \
"sec\n", grid.x, block.x, s.elapsed());
// kopiranje rezultata kernela nazad na host
CHECK(cudaMemcpy(h_Out, d_Out, grid.x*sizeof(int), cudaMemcpyDeviceToHost));
d_s=0;
for(int i=0;i<grid.x;i++) d_s+=h_Out[i];
printf(" %d",h_s);
printf("\n %d \n",d_s);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_Out);
free(h_A);
free(h_B);
free(h_Out);
cudaDeviceReset();
return(0);
}
/*
GeForce GTX 1050
SkalarniOnCPU- Time elapsed 0.012361sec
skalarni <<<131072,32>>> Time elapsed 0.645046sec
1677013280
1677013280
skalarniUnroll2 <<<131072,32>>> Time elapsed 0.637585sec
1677013280
1677013280
skalarniUnroll4 <<<131072,32>>> Time elapsed 0.637881sec
1677013280
1677013280
Skalarni2 <<<131072,32>>> Time elapsed 0.429906sec
1677013280
1677013280
Skalarni3 <<<131072,32>>> Time elapsed 0.636618sec
1677013280
1677013280
skalarni2Unroll2 <<<131072,32>>> Time elapsed 0.429518sec
1677013280
1677013280
skalarni2Unroll4 <<<131072,32>>> Time elapsed 0.428573sec
1677013280
1677013280
skalarni4 <<<131072,32>>> Time elapsed 0.009061sec
1677013280
1677013280
==1524== Warning: 4124404 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==1524== Profiling application: a 32
==1524== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 29.36% 1.28975s 0 55872 23.084us 1.1840us 107.42us gpuRecursiveReduceNosync(int*, int*, unsigned int)
14.66% 643.99ms 1 0 643.99ms 643.99ms 643.99ms skalarni(int*, int*, int*, unsigned int)
14.50% 637.14ms 1 0 637.14ms 637.14ms 637.14ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
14.50% 636.85ms 1 0 636.85ms 636.85ms 636.85ms skalarniUnroll2(int*, int*, int*, unsigned int)
14.48% 635.87ms 1 0 635.87ms 635.87ms 635.87ms skalarni3(int*, int*, int*, int*, unsigned int)
9.25% 406.42ms 0 14032 28.963us 16.992us 107.20us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
1.37% 60.250ms 10 - 6.0250ms 5.4468ms 6.5732ms [CUDA memcpy HtoD]
1.08% 47.394ms 1 0 47.394ms 47.394ms 47.394ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.65% 28.343ms 1 0 28.343ms 28.343ms 28.343ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.07% 3.1848ms 1 0 3.1848ms 3.1848ms 3.1848ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.07% 2.9479ms 1 0 2.9479ms 2.9479ms 2.9479ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.02% 663.80us 8 - 82.975us 78.719us 86.271us [CUDA memcpy DtoH]
0.00% 4.8960us 7 - 699ns 640ns 896ns [CUDA memset]
API calls: 90.80% 3.85326s 8 - 481.66ms 8.7100ms 644.96ms cudaDeviceSynchronize
5.88% 249.34ms 4 - 62.335ms 557.60us 246.32ms cudaMalloc
1.66% 70.373ms 18 - 3.9096ms 394.70us 7.0672ms cudaMemcpy
1.58% 66.956ms 1 - 66.956ms 66.956ms 66.956ms cudaDeviceReset
0.02% 1.0196ms 4 - 254.90us 153.00us 521.80us cudaFree
0.02% 787.80us 7 - 112.54us 51.700us 238.60us cudaMemset
0.02% 692.10us 97 - 7.1350us 200ns 322.30us cuDeviceGetAttribute
0.01% 598.30us 1 - 598.30us 598.30us 598.30us cudaGetDeviceProperties
0.01% 533.80us 8 - 66.725us 37.700us 218.10us cudaLaunchKernel
0.00% 44.000us 1 - 44.000us 44.000us 44.000us cuDeviceTotalMem
0.00% 13.300us 1 - 13.300us 13.300us 13.300us cuDeviceGetPCIBusId
0.00% 12.300us 1 - 12.300us 12.300us 12.300us cudaSetDevice
0.00% 7.5000us 8 - 937ns 800ns 1.0000us cudaGetLastError
0.00% 6.8000us 2 - 3.4000us 400ns 6.4000us cuDeviceGet
0.00% 2.1000us 3 - 700ns 400ns 1.1000us cuDeviceGetCount
0.00% 1.3000us 1 - 1.3000us 1.3000us 1.3000us cuDeviceGetName
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetUuid
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetLuid
SkalarniOnCPU- Time elapsed 0.012476sec
skalarni <<<16384,256>>> Time elapsed 0.140732sec
1677013280
1677013280
skalarniUnroll2 <<<16384,256>>> Time elapsed 0.130778sec
1677013280
1677013280
skalarniUnroll4 <<<16384,256>>> Time elapsed 0.132044sec
1677013280
1677013280
Skalarni2 <<<16384,256>>> Time elapsed 0.105880sec
1677013280
1677013280
Skalarni3 <<<16384,256>>> Time elapsed 0.131352sec
1677013280
1677013280
skalarni2Unroll2 <<<16384,256>>> Time elapsed 0.105701sec
1677013280
1677013280
skalarni2Unroll4 <<<16384,256>>> Time elapsed 0.105816sec
1677013280
1677013280
skalarni4 <<<16384,256>>> Time elapsed 0.004052sec
1677013280
1677013280
==1240== Warning: 798452 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==1240== Profiling application: a 256
==1240== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 62.92% 1.50742s 0 61145 24.653us 1.2160us 64.927us gpuRecursiveReduceNosync(int*, int*, unsigned int)
10.36% 248.27ms 0 8759 28.344us 18.336us 61.824us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
5.83% 139.69ms 1 0 139.69ms 139.69ms 139.69ms skalarni(int*, int*, int*, unsigned int)
5.47% 131.12ms 1 0 131.12ms 131.12ms 131.12ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
5.44% 130.34ms 1 0 130.34ms 130.34ms 130.34ms skalarni3(int*, int*, int*, int*, unsigned int)
5.43% 130.00ms 1 0 130.00ms 130.00ms 130.00ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.55% 61.031ms 10 - 6.1031ms 5.6811ms 6.6876ms [CUDA memcpy HtoD]
1.24% 29.623ms 1 0 29.623ms 29.623ms 29.623ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.65% 15.552ms 1 0 15.552ms 15.552ms 15.552ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.05% 1.2330ms 1 0 1.2330ms 1.2330ms 1.2330ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.05% 1.2298ms 1 0 1.2298ms 1.2298ms 1.2298ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.00% 87.327us 8 - 10.915us 10.560us 11.360us [CUDA memcpy DtoH]
0.00% 21.023us 7 - 3.0030us 2.7190us 3.3920us [CUDA memset]
API calls: 68.90% 855.15ms 8 - 106.89ms 3.9697ms 140.64ms cudaDeviceSynchronize
19.61% 243.44ms 4 - 60.859ms 583.20us 240.45ms cudaMalloc
5.62% 69.803ms 18 - 3.8779ms 244.70us 7.1879ms cudaMemcpy
5.56% 69.051ms 1 - 69.051ms 69.051ms 69.051ms cudaDeviceReset
0.10% 1.2515ms 4 - 312.88us 223.60us 429.20us cudaFree
0.09% 1.0970ms 8 - 137.13us 69.600us 301.10us cudaLaunchKernel
0.04% 497.70us 97 - 5.1300us 200ns 215.70us cuDeviceGetAttribute
0.03% 372.80us 7 - 53.257us 21.900us 204.30us cudaMemset
0.03% 370.70us 1 - 370.70us 370.70us 370.70us cudaGetDeviceProperties
0.00% 41.700us 1 - 41.700us 41.700us 41.700us cuDeviceTotalMem
0.00% 13.400us 1 - 13.400us 13.400us 13.400us cuDeviceGetPCIBusId
0.00% 12.400us 1 - 12.400us 12.400us 12.400us cudaSetDevice
0.00% 6.6000us 2 - 3.3000us 500ns 6.1000us cuDeviceGet
0.00% 6.5000us 8 - 812ns 500ns 1.1000us cudaGetLastError
0.00% 2.4000us 3 - 800ns 300ns 1.1000us cuDeviceGetCount
0.00% 1.4000us 1 - 1.4000us 1.4000us 1.4000us cuDeviceGetName
0.00% 800ns 1 - 800ns 800ns 800ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetUuid
SkalarniOnCPU- Time elapsed 0.012613sec
skalarni <<<8192,512>>> Time elapsed 0.086526sec
1677013280
1677013280
skalarniUnroll2 <<<8192,512>>> Time elapsed 0.076569sec
1677013280
1677013280
skalarniUnroll4 <<<8192,512>>> Time elapsed 0.076975sec
1677013280
1677013280
Skalarni2 <<<8192,512>>> Time elapsed 0.065407sec
1677013280
1677013280
Skalarni3 <<<8192,512>>> Time elapsed 0.076832sec
1677013280
1677013280
skalarni2Unroll2 <<<8192,512>>> Time elapsed 0.065597sec
1677013280
1677013280
skalarni2Unroll4 <<<8192,512>>> Time elapsed 0.064851sec
1677013280
1677013280
skalarni4 <<<8192,512>>> Time elapsed 0.004588sec
1677013280
1677013280
==3456== Warning: 421620 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==3456== Profiling application: a 512
==3456== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 71.08% 1.54750s 0 62110 24.915us 1.2160us 50.400us gpuRecursiveReduceNosync(int*, int*, unsigned int)
9.79% 213.13ms 0 7794 27.344us 19.136us 47.711us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
3.93% 85.534ms 1 0 85.534ms 85.534ms 85.534ms skalarni(int*, int*, int*, unsigned int)
3.50% 76.160ms 1 0 76.160ms 76.160ms 76.160ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
3.49% 76.075ms 1 0 76.075ms 76.075ms 76.075ms skalarni3(int*, int*, int*, int*, unsigned int)
3.48% 75.695ms 1 0 75.695ms 75.695ms 75.695ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.76% 59.995ms 10 - 5.9995ms 5.4042ms 6.7181ms [CUDA memcpy HtoD]
1.20% 26.159ms 1 0 26.159ms 26.159ms 26.159ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.63% 13.655ms 1 0 13.655ms 13.655ms 13.655ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.09% 1.9138ms 1 0 1.9138ms 1.9138ms 1.9138ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.06% 1.2708ms 1 0 1.2708ms 1.2708ms 1.2708ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.00% 50.591us 8 - 6.3230us 5.6960us 7.5520us [CUDA memcpy DtoH]
0.00% 12.864us 7 - 1.8370us 1.7280us 2.0160us [CUDA memset]
API calls: 57.40% 516.24ms 8 - 64.531ms 4.4770ms 86.437ms cudaDeviceSynchronize
27.08% 243.56ms 4 - 60.890ms 467.40us 240.62ms cudaMalloc
7.66% 68.855ms 18 - 3.8253ms 201.80us 7.3965ms cudaMemcpy
7.40% 66.553ms 1 - 66.553ms 66.553ms 66.553ms cudaDeviceReset
0.19% 1.6732ms 4 - 418.30us 170.00us 920.20us cudaFree
0.11% 981.90us 8 - 122.74us 69.200us 227.20us cudaLaunchKernel
0.06% 575.00us 7 - 82.142us 22.600us 291.50us cudaMemset
0.05% 463.20us 97 - 4.7750us 200ns 207.70us cuDeviceGetAttribute
0.04% 370.10us 1 - 370.10us 370.10us 370.10us cudaGetDeviceProperties
0.00% 40.700us 1 - 40.700us 40.700us 40.700us cuDeviceTotalMem
0.00% 13.400us 1 - 13.400us 13.400us 13.400us cuDeviceGetPCIBusId
0.00% 12.900us 1 - 12.900us 12.900us 12.900us cudaSetDevice
0.00% 6.7000us 8 - 837ns 500ns 1.0000us cudaGetLastError
0.00% 6.3000us 2 - 3.1500us 400ns 5.9000us cuDeviceGet
0.00% 2.5000us 3 - 833ns 400ns 1.1000us cuDeviceGetCount
0.00% 1.5000us 1 - 1.5000us 1.5000us 1.5000us cuDeviceGetName
0.00% 800ns 1 - 800ns 800ns 800ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetUuid
SkalarniOnCPU- Time elapsed 0.012840sec
skalarni <<<4096,1024>>> Time elapsed 0.051539sec
1677013280
1677013280
skalarniUnroll2 <<<4096,1024>>> Time elapsed 0.050108sec
1677013280
1677013280
skalarniUnroll4 <<<4096,1024>>> Time elapsed 0.046660sec
1677013280
1677013280
Skalarni2 <<<4096,1024>>> Time elapsed 0.040571sec
1677013280
1677013280
Skalarni3 <<<4096,1024>>> Time elapsed 0.046409sec
1677013280
1677013280
skalarni2Unroll2 <<<4096,1024>>> Time elapsed 0.040531sec
1677013280
1677013280
skalarni2Unroll4 <<<4096,1024>>> Time elapsed 0.040056sec
1677013280
1677013280
skalarni4 <<<4096,1024>>> Time elapsed 0.005426sec
1677013280
1677013280
==5908== Warning: 204532 records were dropped due to insufficient device buffer space. You can configure the buffer space using advanced options --device-buffer-size, --device-cdp-buffer-size
==5908== Profiling application: a 1024
==5908== Profiling result:
Type Time(%) Time Calls (host) Calls (device) Avg Min Max Name
GPU activities: 77.23% 1.59776s 0 62899 25.401us 1.2800us 183.58us gpuRecursiveReduceNosync(int*, int*, unsigned int)
9.38% 194.08ms 0 7005 27.706us 19.392us 176.99us skalarniPomGPU(int*, int*, unsigned int, unsigned int)
2.88% 59.628ms 10 - 5.9628ms 5.2801ms 6.6448ms [CUDA memcpy HtoD]
2.45% 50.593ms 1 0 50.593ms 50.593ms 50.593ms skalarni(int*, int*, int*, unsigned int)
2.38% 49.227ms 1 0 49.227ms 49.227ms 49.227ms skalarniUnroll2(int*, int*, int*, unsigned int)
2.21% 45.784ms 1 0 45.784ms 45.784ms 45.784ms skalarniUnroll4(int*, int*, int*, int*, unsigned int)
2.19% 45.308ms 1 0 45.308ms 45.308ms 45.308ms skalarni3(int*, int*, int*, int*, unsigned int)
0.72% 14.826ms 1 0 14.826ms 14.826ms 14.826ms skalarni2Unroll2(int*, int*, int*, int*, unsigned int, int)
0.41% 8.5484ms 1 0 8.5484ms 8.5484ms 8.5484ms skalarni2Unroll4(int*, int*, int*, int*, unsigned int, int)
0.08% 1.7003ms 1 0 1.7003ms 1.7003ms 1.7003ms skalarni4(int*, int*, int*, int*, unsigned int, int)
0.06% 1.3143ms 1 0 1.3143ms 1.3143ms 1.3143ms skalarni2(int*, int*, int*, int*, unsigned int, int)
0.00% 26.688us 8 - 3.3360us 2.9440us 4.0960us [CUDA memcpy DtoH]
0.00% 9.4720us 7 - 1.3530us 768ns 1.5680us [CUDA memset]
API calls: 45.30% 319.94ms 8 - 39.993ms 5.3326ms 51.447ms cudaDeviceSynchronize
35.36% 249.76ms 4 - 62.439ms 546.70us 246.77ms cudaMalloc
9.43% 66.636ms 18 - 3.7020ms 201.60us 7.1874ms cudaMemcpy
9.29% 65.641ms 1 - 65.641ms 65.641ms 65.641ms cudaDeviceReset
0.15% 1.0551ms 8 - 131.89us 70.000us 237.20us cudaLaunchKernel
0.14% 1.0145ms 7 - 144.93us 22.400us 656.30us cudaMemset
0.13% 888.40us 4 - 222.10us 183.50us 309.20us cudaFree
0.10% 687.90us 97 - 7.0910us 200ns 313.20us cuDeviceGetAttribute
0.08% 597.90us 1 - 597.90us 597.90us 597.90us cudaGetDeviceProperties
0.01% 40.200us 1 - 40.200us 40.200us 40.200us cuDeviceTotalMem
0.00% 13.800us 1 - 13.800us 13.800us 13.800us cuDeviceGetPCIBusId
0.00% 12.800us 1 - 12.800us 12.800us 12.800us cudaSetDevice
0.00% 7.7000us 8 - 962ns 900ns 1.1000us cudaGetLastError
0.00% 7.6000us 2 - 3.8000us 400ns 7.2000us cuDeviceGet
0.00% 1.9000us 3 - 633ns 300ns 1.1000us cuDeviceGetCount
0.00% 1.4000us 1 - 1.4000us 1.4000us 1.4000us cuDeviceGetName
0.00% 700ns 1 - 700ns 700ns 700ns cuDeviceGetLuid
0.00% 500ns 1 - 500ns 500ns 500ns cuDeviceGetUuid
*/ |
3f04f185969b061cd81a946c5de386880d1a9ac3.hip | // !!! This is a file automatically generated by hipify!!!
// Includes
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 16
#define ASSOC 24
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
texture<float,1,hipReadModeElementType> texmem5;
texture<float,1,hipReadModeElementType> texmem6;
texture<float,1,hipReadModeElementType> texmem7;
texture<float,1,hipReadModeElementType> texmem9;
texture<float,1,hipReadModeElementType> texmem8;
__global__ void tex_bm_kernel( float* out, unsigned size, int iterations)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
float Value=0;float Value1=0;float Value2=0;float Value3=0;float Value4=0;float Value5=0;
if(tid < size){
for(unsigned i=0; i<iterations; ++i){
Value1 = tex1Dfetch(texmem1,tid) + Value5;
Value2 = tex1Dfetch(texmem2,tid) + Value4;
Value3 = tex1Dfetch(texmem3,tid) + Value3;
Value4 = tex1Dfetch(texmem4,tid) + Value1;
Value5 = tex1Dfetch(texmem5,tid) + Value2;
Value+=i+Value5+Value3;
}
}
__syncthreads();
out[tid]=Value;
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
hipMalloc((void**) &device_texture1, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture2, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture3, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture4, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture5, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture6, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture7, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture8, texmem_size*sizeof(float));
hipMalloc((void**) &device_texture9, texmem_size*sizeof(float));
hipMalloc((void**) &device_out, texmem_size*sizeof(float)*10);
hipMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, texmem_size*sizeof(float));
hipBindTexture(0, texmem2, device_texture2, texmem_size*sizeof(float));
hipBindTexture(0, texmem3, device_texture3, texmem_size*sizeof(float));
hipBindTexture(0, texmem4, device_texture4, texmem_size*sizeof(float));
hipBindTexture(0, texmem5, device_texture5, texmem_size*sizeof(float));
hipBindTexture(0, texmem6, device_texture6, texmem_size*sizeof(float));
hipBindTexture(0, texmem7, device_texture7, texmem_size*sizeof(float));
hipBindTexture(0, texmem8, device_texture8, texmem_size*sizeof(float));
hipBindTexture(0, texmem9, device_texture9, texmem_size*sizeof(float));
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( tex_bm_kernel), dim3(grid), dim3(threads), 0 , 0, device_out, texmem_size, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
hipMemcpy(host_out, device_out, texmem_size*sizeof(float), hipMemcpyDeviceToHost);
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
hipUnbindTexture(texmem1);
hipUnbindTexture(texmem2);
hipUnbindTexture(texmem3);
hipUnbindTexture(texmem4);
hipUnbindTexture(texmem5);
hipUnbindTexture(texmem6);
hipUnbindTexture(texmem7);
hipUnbindTexture(texmem8);
hipUnbindTexture(texmem9);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
return 0;
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| 3f04f185969b061cd81a946c5de386880d1a9ac3.cu | // Includes
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
// includes CUDA
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 16
#define ASSOC 24
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
texture<float,1,cudaReadModeElementType> texmem5;
texture<float,1,cudaReadModeElementType> texmem6;
texture<float,1,cudaReadModeElementType> texmem7;
texture<float,1,cudaReadModeElementType> texmem9;
texture<float,1,cudaReadModeElementType> texmem8;
__global__ void tex_bm_kernel( float* out, unsigned size, int iterations)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
float Value=0;float Value1=0;float Value2=0;float Value3=0;float Value4=0;float Value5=0;
if(tid < size){
for(unsigned i=0; i<iterations; ++i){
Value1 = tex1Dfetch(texmem1,tid) + Value5;
Value2 = tex1Dfetch(texmem2,tid) + Value4;
Value3 = tex1Dfetch(texmem3,tid) + Value3;
Value4 = tex1Dfetch(texmem4,tid) + Value1;
Value5 = tex1Dfetch(texmem5,tid) + Value2;
Value+=i+Value5+Value3;
}
}
__syncthreads();
out[tid]=Value;
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int iterations;
if (argc != 2){
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else{
iterations = atoi(argv[1]);
}
printf("Power Microbenchmark with %d iterations\n",iterations);
unsigned texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
cudaMalloc((void**) &device_texture1, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture2, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture3, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture4, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture5, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture6, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture7, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture8, texmem_size*sizeof(float));
cudaMalloc((void**) &device_texture9, texmem_size*sizeof(float));
cudaMalloc((void**) &device_out, texmem_size*sizeof(float)*10);
cudaMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, texmem_size*sizeof(float));
cudaBindTexture(0, texmem2, device_texture2, texmem_size*sizeof(float));
cudaBindTexture(0, texmem3, device_texture3, texmem_size*sizeof(float));
cudaBindTexture(0, texmem4, device_texture4, texmem_size*sizeof(float));
cudaBindTexture(0, texmem5, device_texture5, texmem_size*sizeof(float));
cudaBindTexture(0, texmem6, device_texture6, texmem_size*sizeof(float));
cudaBindTexture(0, texmem7, device_texture7, texmem_size*sizeof(float));
cudaBindTexture(0, texmem8, device_texture8, texmem_size*sizeof(float));
cudaBindTexture(0, texmem9, device_texture9, texmem_size*sizeof(float));
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
tex_bm_kernel<<< grid, threads, 0 >>>(device_out, texmem_size, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("gpu execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
cudaMemcpy(host_out, device_out, texmem_size*sizeof(float), cudaMemcpyDeviceToHost);
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
cudaUnbindTexture(texmem1);
cudaUnbindTexture(texmem2);
cudaUnbindTexture(texmem3);
cudaUnbindTexture(texmem4);
cudaUnbindTexture(texmem5);
cudaUnbindTexture(texmem6);
cudaUnbindTexture(texmem7);
cudaUnbindTexture(texmem8);
cudaUnbindTexture(texmem9);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
return 0;
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
caa0e88bd4e95e5d5e3bee2d958ebace3fd0e59a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "strings_column.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "error.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cstdint>
#include <vector>
void gather_string_offsets(cudf::table_view table,
std::vector<cudf::size_type> const &offsets,
std::vector<std::vector<cudf::size_type>> &string_send_offsets,
std::vector<std::vector<int64_t>> &string_recv_offsets,
CommunicationGroup comm_group,
Communicator *communicator)
{
int comm_group_size = comm_group.size();
rmm::device_vector<cudf::size_type> d_offsets(offsets);
for (cudf::size_type icol = 0; icol < table.num_columns(); icol++) {
cudf::data_type dtype = table.column(icol).type();
if (dtype.id() != cudf::type_id::STRING) {
// 1. If not a string column, push an empty vector
string_send_offsets.emplace_back();
string_recv_offsets.emplace_back();
continue;
} else {
string_send_offsets.emplace_back(comm_group_size + 1);
string_recv_offsets.emplace_back(comm_group_size + 1);
// 2. Gather `string_send_offsets` using the offset subcolumn and `d_offsets`
rmm::device_vector<cudf::size_type> d_string_send_offsets(comm_group_size + 1);
thrust::gather(rmm::exec_policy(),
d_offsets.begin(),
d_offsets.end(),
thrust::device_ptr<const cudf::size_type>(
table.column(icol).child(0).head<cudf::size_type>()),
d_string_send_offsets.begin());
CUDA_RT_CALL(hipMemcpy(string_send_offsets[icol].data(),
thrust::raw_pointer_cast(d_string_send_offsets.data()),
(comm_group_size + 1) * sizeof(cudf::size_type),
hipMemcpyDeviceToHost));
// 3. Communicate string_send_offsets and receive string_recv_offsets
communicate_sizes(
string_send_offsets[icol], string_recv_offsets[icol], comm_group, communicator);
}
}
}
void calculate_string_sizes_from_offsets(
cudf::table_view input_table,
cudf::size_type begin,
cudf::size_type end,
std::vector<rmm::device_uvector<cudf::size_type>> &output_sizes)
{
output_sizes.clear();
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::column_view input_column = input_table.column(icol);
if (input_column.type().id() != cudf::type_id::STRING) {
output_sizes.emplace_back(0, rmm::cuda_stream_default);
continue;
}
output_sizes.emplace_back(end - begin, rmm::cuda_stream_default);
thrust::transform(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + end + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin),
thrust::device_ptr<cudf::size_type>(output_sizes[icol].data()),
thrust::minus<cudf::size_type>());
}
}
void calculate_string_offsets_from_sizes(
cudf::mutable_table_view output_table,
std::vector<rmm::device_uvector<cudf::size_type>> const &input_sizes)
{
for (cudf::size_type icol = 0; icol < output_table.num_columns(); icol++) {
cudf::mutable_column_view output_column = output_table.column(icol);
if (output_column.type().id() != cudf::type_id::STRING) continue;
cudf::size_type nrows = output_column.size();
const cudf::size_type *sizes_start = input_sizes[icol].data();
const cudf::size_type *sizes_end = sizes_start + nrows;
thrust::inclusive_scan(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(sizes_start),
thrust::device_ptr<const cudf::size_type>(sizes_end),
thrust::device_ptr<cudf::size_type>(
static_cast<cudf::size_type *>(output_column.child(0).head())) +
1);
CUDA_RT_CALL(hipMemsetAsync(output_column.child(0).head(), 0, sizeof(cudf::size_type), 0));
}
}
void allocate_string_sizes_receive_buffer(
cudf::table_view input_table,
std::vector<int64_t> recv_offsets,
std::vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv)
{
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
if (input_table.column(icol).type().id() != cudf::type_id::STRING) {
string_sizes_recv.emplace_back(0, rmm::cuda_stream_default);
} else {
string_sizes_recv.emplace_back(recv_offsets.back(), rmm::cuda_stream_default);
}
}
}
| caa0e88bd4e95e5d5e3bee2d958ebace3fd0e59a.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "strings_column.hpp"
#include "all_to_all_comm.hpp"
#include "communicator.hpp"
#include "error.hpp"
#include <cudf/column/column_view.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/types.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/device_ptr.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <cstdint>
#include <vector>
void gather_string_offsets(cudf::table_view table,
std::vector<cudf::size_type> const &offsets,
std::vector<std::vector<cudf::size_type>> &string_send_offsets,
std::vector<std::vector<int64_t>> &string_recv_offsets,
CommunicationGroup comm_group,
Communicator *communicator)
{
int comm_group_size = comm_group.size();
rmm::device_vector<cudf::size_type> d_offsets(offsets);
for (cudf::size_type icol = 0; icol < table.num_columns(); icol++) {
cudf::data_type dtype = table.column(icol).type();
if (dtype.id() != cudf::type_id::STRING) {
// 1. If not a string column, push an empty vector
string_send_offsets.emplace_back();
string_recv_offsets.emplace_back();
continue;
} else {
string_send_offsets.emplace_back(comm_group_size + 1);
string_recv_offsets.emplace_back(comm_group_size + 1);
// 2. Gather `string_send_offsets` using the offset subcolumn and `d_offsets`
rmm::device_vector<cudf::size_type> d_string_send_offsets(comm_group_size + 1);
thrust::gather(rmm::exec_policy(),
d_offsets.begin(),
d_offsets.end(),
thrust::device_ptr<const cudf::size_type>(
table.column(icol).child(0).head<cudf::size_type>()),
d_string_send_offsets.begin());
CUDA_RT_CALL(cudaMemcpy(string_send_offsets[icol].data(),
thrust::raw_pointer_cast(d_string_send_offsets.data()),
(comm_group_size + 1) * sizeof(cudf::size_type),
cudaMemcpyDeviceToHost));
// 3. Communicate string_send_offsets and receive string_recv_offsets
communicate_sizes(
string_send_offsets[icol], string_recv_offsets[icol], comm_group, communicator);
}
}
}
void calculate_string_sizes_from_offsets(
cudf::table_view input_table,
cudf::size_type begin,
cudf::size_type end,
std::vector<rmm::device_uvector<cudf::size_type>> &output_sizes)
{
output_sizes.clear();
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
cudf::column_view input_column = input_table.column(icol);
if (input_column.type().id() != cudf::type_id::STRING) {
output_sizes.emplace_back(0, rmm::cuda_stream_default);
continue;
}
output_sizes.emplace_back(end - begin, rmm::cuda_stream_default);
thrust::transform(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + end + 1),
thrust::device_ptr<const cudf::size_type>(
input_column.child(0).begin<const cudf::size_type>() + begin),
thrust::device_ptr<cudf::size_type>(output_sizes[icol].data()),
thrust::minus<cudf::size_type>());
}
}
void calculate_string_offsets_from_sizes(
cudf::mutable_table_view output_table,
std::vector<rmm::device_uvector<cudf::size_type>> const &input_sizes)
{
for (cudf::size_type icol = 0; icol < output_table.num_columns(); icol++) {
cudf::mutable_column_view output_column = output_table.column(icol);
if (output_column.type().id() != cudf::type_id::STRING) continue;
cudf::size_type nrows = output_column.size();
const cudf::size_type *sizes_start = input_sizes[icol].data();
const cudf::size_type *sizes_end = sizes_start + nrows;
thrust::inclusive_scan(
// rmm::exec_policy(rmm::cuda_stream_default),
thrust::device_ptr<const cudf::size_type>(sizes_start),
thrust::device_ptr<const cudf::size_type>(sizes_end),
thrust::device_ptr<cudf::size_type>(
static_cast<cudf::size_type *>(output_column.child(0).head())) +
1);
CUDA_RT_CALL(cudaMemsetAsync(output_column.child(0).head(), 0, sizeof(cudf::size_type), 0));
}
}
void allocate_string_sizes_receive_buffer(
cudf::table_view input_table,
std::vector<int64_t> recv_offsets,
std::vector<rmm::device_uvector<cudf::size_type>> &string_sizes_recv)
{
for (cudf::size_type icol = 0; icol < input_table.num_columns(); icol++) {
if (input_table.column(icol).type().id() != cudf::type_id::STRING) {
string_sizes_recv.emplace_back(0, rmm::cuda_stream_default);
} else {
string_sizes_recv.emplace_back(recv_offsets.back(), rmm::cuda_stream_default);
}
}
}
|
5dcd45aec90bbfc56e69eaed36df3ff14c269a97.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <rocblas.h>
int main() {
hipError_t cuda_stat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int array_size = (1 << 22);
float* h_x = new float[array_size];
float* h_y = new float[array_size];
for (int i = 0; i < array_size; ++i) {
h_x[i] = i * 1.0f;
h_y[i] = i * 1.0f;
}
float* d_x;
float* d_y;
cuda_stat = hipMalloc(&d_x, sizeof(float) * array_size);
cuda_stat = hipMalloc(&d_y, sizeof(float) * array_size);
stat = hipblasCreate(&handle);
stat = hipblasSetVector(
array_size,
sizeof(*h_x),
h_x,
/* space by host */ 1,
d_x,
/* space by device */ 1
);
stat = hipblasSetVector(
array_size,
sizeof(*h_y),
h_y,
1,
d_y,
1
);
float alpha = 1.0;
float scalar_product;
float norm_x;
float norm_y;
stat = hipblasSdot(
handle,
array_size,
d_x, 1,
d_y, 1,
&scalar_product
);
stat = hipblasSnrm2(
handle,
array_size,
d_x, 1,
&norm_x
);
stat = hipblasSnrm2(
handle,
array_size,
d_y, 1,
&norm_y
);
std::cout << scalar_product / norm_x / norm_y << std::endl;
hipblasDestroy(handle);
return 0;
// 6 * 5
// 0 1 2 3 4
// 5 6 7 8 9
// 10 11 12 13 14
// ...
} | 5dcd45aec90bbfc56e69eaed36df3ff14c269a97.cu | #include <iostream>
#include <cublas_v2.h>
int main() {
cudaError_t cuda_stat;
cublasStatus_t stat;
cublasHandle_t handle;
int array_size = (1 << 22);
float* h_x = new float[array_size];
float* h_y = new float[array_size];
for (int i = 0; i < array_size; ++i) {
h_x[i] = i * 1.0f;
h_y[i] = i * 1.0f;
}
float* d_x;
float* d_y;
cuda_stat = cudaMalloc(&d_x, sizeof(float) * array_size);
cuda_stat = cudaMalloc(&d_y, sizeof(float) * array_size);
stat = cublasCreate(&handle);
stat = cublasSetVector(
array_size,
sizeof(*h_x),
h_x,
/* space by host */ 1,
d_x,
/* space by device */ 1
);
stat = cublasSetVector(
array_size,
sizeof(*h_y),
h_y,
1,
d_y,
1
);
float alpha = 1.0;
float scalar_product;
float norm_x;
float norm_y;
stat = cublasSdot(
handle,
array_size,
d_x, 1,
d_y, 1,
&scalar_product
);
stat = cublasSnrm2(
handle,
array_size,
d_x, 1,
&norm_x
);
stat = cublasSnrm2(
handle,
array_size,
d_y, 1,
&norm_y
);
std::cout << scalar_product / norm_x / norm_y << std::endl;
cublasDestroy(handle);
return 0;
// 6 * 5
// 0 1 2 3 4
// 5 6 7 8 9
// 10 11 12 13 14
// ...
} |
ba0708fe2be221658c367ad9a2adf8dcba665702.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <trajectory_generation/GATE.cuh>
#include <dynamics/quadrotor/quad_dynamics.cuh>
#include <controllers/quadrotor/quad_LQR.cuh>
#include <memory>
#include <chrono>
#include <iostream>
//template <class T>
//__global__ void testx0Kernel(T::all_states_at_t* x0_noise_device) {
// int idx = blockDim.x * blockIdx.x + threadIdx.x;
// printf("Idx: %i, Mean: %f\n", idx, (*x0_noise_device).row(idx).mean());
//}
TEST(GATE_Quad, PropagateTrajectories) {
// simulation timestep
float dt = 0.01;
// Setup rollouts
const int num_timesteps = 1500;
const int num_rollouts = 1024;
const int bdim_x = 64;
// set params
std::shared_ptr<QuadDynamics> dyn = std::make_shared<QuadDynamics>();
// LQR Controller
std::shared_ptr<QuadLQR> ctrl = std::make_shared<QuadLQR>();
auto default_guid_params = ctrl->getGuidanceParams();
//default_guid_params.dt = dt; //todo is this necessary?
//default_guid_params.setNumTimesteps(num_timesteps);
auto default_dyn_params = ctrl->getDynamicsParams();
ctrl->setGuidanceParams(default_guid_params);
ctrl->setDynamicsParams(default_dyn_params);
dyn->setParams(default_dyn_params);
// perturbations
QuadPertParams::state_array x0_mean, x0_std;
QuadPertParams::control_array u_std;
x0_mean << -2.f, -2.f, -2.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f;
x0_std << 0.1f, 0.1f, 0.1f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f;
//u_std << .0f, .0f, .0f, .0f;
u_std << 0, 0, .25f, .25f, .25f, .25f; // First two inputs are force in x and y
QuadPertParams params = QuadPertParams(x0_mean, x0_std, u_std);
std::shared_ptr<QuadPert<num_timesteps, num_rollouts>> pert = std::make_shared<QuadPert<num_timesteps, num_rollouts>>(params);
pert->initializeX0andControlPerturbations();
pert->initPerturbations();
// Create the GATE object
typedef GATE<QuadDynamics, QuadLQR, QuadPert<num_timesteps, num_rollouts>, num_timesteps, num_rollouts, bdim_x> Quad_ROTE;
std::shared_ptr<Quad_ROTE> RTE = std::make_shared<Quad_ROTE>(dyn.get(), ctrl.get(), pert.get(), x0_mean, dt);
RTE->computeTrajectories();
// Assert that there are no NaN's
for (int i = 0; i < num_rollouts; ++i) {
for (int j = 0; j < QuadDynamics::STATE_DIM; ++j) {
ASSERT_FALSE(isnan(
RTE->state_trajectories_host[num_timesteps * QuadDynamics::STATE_DIM * i // Rollout
+ (num_timesteps - 1) * QuadDynamics::STATE_DIM //Timestep
+ j])) << "Rollout: " << i << ", State: " << j << std::endl; // State
}
}
} | ba0708fe2be221658c367ad9a2adf8dcba665702.cu | #include <gtest/gtest.h>
#include <trajectory_generation/GATE.cuh>
#include <dynamics/quadrotor/quad_dynamics.cuh>
#include <controllers/quadrotor/quad_LQR.cuh>
#include <memory>
#include <chrono>
#include <iostream>
//template <class T>
//__global__ void testx0Kernel(T::all_states_at_t* x0_noise_device) {
// int idx = blockDim.x * blockIdx.x + threadIdx.x;
// printf("Idx: %i, Mean: %f\n", idx, (*x0_noise_device).row(idx).mean());
//}
TEST(GATE_Quad, PropagateTrajectories) {
// simulation timestep
float dt = 0.01;
// Setup rollouts
const int num_timesteps = 1500;
const int num_rollouts = 1024;
const int bdim_x = 64;
// set params
std::shared_ptr<QuadDynamics> dyn = std::make_shared<QuadDynamics>();
// LQR Controller
std::shared_ptr<QuadLQR> ctrl = std::make_shared<QuadLQR>();
auto default_guid_params = ctrl->getGuidanceParams();
//default_guid_params.dt = dt; //todo is this necessary?
//default_guid_params.setNumTimesteps(num_timesteps);
auto default_dyn_params = ctrl->getDynamicsParams();
ctrl->setGuidanceParams(default_guid_params);
ctrl->setDynamicsParams(default_dyn_params);
dyn->setParams(default_dyn_params);
// perturbations
QuadPertParams::state_array x0_mean, x0_std;
QuadPertParams::control_array u_std;
x0_mean << -2.f, -2.f, -2.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f;
x0_std << 0.1f, 0.1f, 0.1f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f;
//u_std << .0f, .0f, .0f, .0f;
u_std << 0, 0, .25f, .25f, .25f, .25f; // First two inputs are force in x and y
QuadPertParams params = QuadPertParams(x0_mean, x0_std, u_std);
std::shared_ptr<QuadPert<num_timesteps, num_rollouts>> pert = std::make_shared<QuadPert<num_timesteps, num_rollouts>>(params);
pert->initializeX0andControlPerturbations();
pert->initPerturbations();
// Create the GATE object
typedef GATE<QuadDynamics, QuadLQR, QuadPert<num_timesteps, num_rollouts>, num_timesteps, num_rollouts, bdim_x> Quad_ROTE;
std::shared_ptr<Quad_ROTE> RTE = std::make_shared<Quad_ROTE>(dyn.get(), ctrl.get(), pert.get(), x0_mean, dt);
RTE->computeTrajectories();
// Assert that there are no NaN's
for (int i = 0; i < num_rollouts; ++i) {
for (int j = 0; j < QuadDynamics::STATE_DIM; ++j) {
ASSERT_FALSE(isnan(
RTE->state_trajectories_host[num_timesteps * QuadDynamics::STATE_DIM * i // Rollout
+ (num_timesteps - 1) * QuadDynamics::STATE_DIM //Timestep
+ j])) << "Rollout: " << i << ", State: " << j << std::endl; // State
}
}
} |
bc4a4e6de1d7b156299076a22c5c794519512f07.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <sstream>
/*
#include <multithreading.h>
#include <cutil.h>
#include <cutil_inline.h>
#include <hip/hip_runtime_api.h>
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
/*#include <helper_cuda_drvapi.h>*/
/*#include <nvrtc_helper.h>*/
#include <multithreading.h>
#include "crackkernel.cu"
#include "entry.cpp"
#include "zip.c"
#include "pbkdf2.cpp"
using namespace std;
dim3 threads(128);
dim3 blocks(12,10);
int threadcount = threads.x*blocks.x*blocks.y;
//Chot dung de chia cot cua hang.
int width = 80;
unsigned char extension,in[CHUNK],found=0;
/*Bien luu thong tin ve tep da duoc ma hoa
- mang chua gia tri PVV
- mang chua gia tri salt
*/
int dkLen,saltLen,len;
int stored_pvv[2];
int S[16];
/*Danh cho da GPU, lay ve so GPU duoc CUDA ho tro, co the dung de tinh toan*/
const int MAX_GPU_COUNT = 8;
int GPU_N;
CUTThread threadID[MAX_GPU_COUNT];
TGPUplan plan[MAX_GPU_COUNT];
char *emptyArr;
char temp2[20];
float time_parallel = 0;
//Chua cac tu trong tu dien
char hostArray[869229][80];
/*Ket thuc*/
fcrypt_ctx h_zcx[1];
/*Bien chua thong tin van pham*/
int v_prev, v_curr;
int Sk[33];
/*Ket thuc bien chua thong tin van pham*/
void initHost(){
/*Hien thi so GPU duoc ho tro*/
checkCudaErrors(hipGetDeviceCount(&GPU_N));
if(GPU_N > MAX_GPU_COUNT) GPU_N = MAX_GPU_COUNT;
printf("\nCUDA-capable device count: %i\n", GPU_N);
/*Ket thuc qua trinh hien thi*/
emptyArr = (char*)malloc(sizeof(char)*width*threadcount);
memset(emptyArr, '\0', sizeof(char)*width*threadcount);
for (int i=0;i<GPU_N;i++)
{
//khoi tao plan->device
plan[i].device=i;
// Chuong trinh moi giai quyet van de la quantities = 1
plan[i].quantities = 1;
}
//khoi tao cho zcx
h_zcx->mode = 1;
h_zcx->encr_pos = BLOCK_SIZE;
memset(h_zcx->nonce, 0, BLOCK_SIZE * sizeof(unsigned char));
}
void freeCUDA()
{
for (int i=0;i<GPU_N;i++)
{
hipFree(plan[i].devPass);
hipFree(plan[i].d_pre_terminal);
hipFree(plan[i].deviceArrPtr);
hipFree(plan[i].d_salt);
hipFree(plan[i].d_pvv);
hipFree(plan[i].d_in);
hipFree(plan[i].d_out);
}
}
static CUT_THREADPROC solverThread(TGPUplan *plan){
/******************************************************************
Khai bao bien
******************************************************************/
//devPitch - truyen vao nhung khi lay gia tri ra thi lai khong dung den no.
size_t devPitch;
int pivot_base = 0;
int ret[threadcount];
//Khai bao mang hostPass de hien thi cac mat khau tra ve.
char hostPass[threadcount][80];
memset(hostPass,'\0', sizeof(char)*threadcount*80);
/*****************************************************************
Ket thuc khai bao bien
******************************************************************/
memset(ret,-1,sizeof(int)*threadcount);
/*****************************************************************
Cap phat bo nho tren moi GPU, truyen du lieu can cho tinh toan tu Host len Device
*****************************************************************/
//Set device
checkCudaErrors(hipSetDevice(plan->device));
hipMallocPitch((void**)&plan->devPass, &devPitch, width * sizeof(char), threadcount);
//Khoi tao plan->deviceArrPtr cho moi GPU
hipMallocPitch((void**)&plan->deviceArrPtr, &devPitch, width * sizeof(char), plan->wordCount);
hipMemcpy2D(plan->deviceArrPtr, width*sizeof(char), hostArray + plan->startIndex, width*sizeof(char), width, plan->wordCount, hipMemcpyHostToDevice);
//Khoi tao gia tri kiem tra mat khau tren moi GPU
hipMalloc((void**)&plan->d_salt, sizeof(int) * 16);
hipMemcpy(plan->d_salt, S, sizeof(int) * 16, hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_pvv, sizeof(int) * 2);
hipMemcpy(plan->d_pvv, stored_pvv, sizeof(int) * 2, hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_pre_terminal, sizeof(char) * strlen(temp2));
hipMemcpy(plan->d_pre_terminal, temp2, sizeof(char) * strlen(temp2), hipMemcpyHostToDevice);
hipMalloc((void**)&plan->d_out, threadcount*CHUNK*sizeof(unsigned char));
hipMalloc((void**)&plan->d_in,threadcount*CHUNK*sizeof(unsigned char));
hipMalloc((void**)&plan->d_Key,threadcount*16*sizeof(unsigned char));
hipMalloc((void**)&plan->d_ret,threadcount*sizeof(unsigned int));
plan->Key = (unsigned char *)malloc(sizeof(unsigned char)*16*threadcount);
//cap phat bo nho cho phan giai ma
hipMalloc((void**)&plan->d_zcx,threadcount*sizeof(fcrypt_ctx));
hipMalloc((void**)&plan->d_acx,threadcount*sizeof(aes_ctx));
//cap phat bo nho cho phan giai nen
hipMalloc((void**)&plan->d_strm, threadcount*sizeof(z_stream));
hipMalloc((void**)&plan->d_state,threadcount*sizeof(struct inflate_state FAR));
/****************************************************************
Ket thuc qua trinh truyen du lieu
*****************************************************************/
/****************************************************************
Qua trinh goi Kernel nhieu lan, viec goi la doc lap giua cac Device
*****************************************************************/
pivot_base = plan->device*threadcount;
//checkCudaErrors(hipDeviceSynchronize());
while((pivot_base < plan->wordCount)&&(!found))
{
//Reset lai cac gia tri truoc moi lan chay Kernel
hipMemcpy2D(plan->devPass, width*sizeof(char), emptyArr,width*sizeof(char), width, threadcount, hipMemcpyHostToDevice);
hipMemset (plan->d_out, 0, threadcount*CHUNK);
for (int i=0;i<threadcount;i++) {
hipMemcpy(plan->d_in+i*CHUNK, in, CHUNK*sizeof(unsigned char), hipMemcpyHostToDevice);
hipMemcpy(plan->d_zcx+i, h_zcx, sizeof(fcrypt_ctx), hipMemcpyHostToDevice);}
hipMemset (plan->d_ret, -1, threadcount*sizeof(int));
//chay kernel
hipLaunchKernelGGL(( RunKernel), dim3(blocks), dim3(threads), 0, 0, pivot_base, plan->devPass,plan->deviceArrPtr, width, plan->quantities, plan->wordCount, plan->d_pre_terminal,strlen(temp2), plan->d_salt, saltLen, plan->d_pvv, dkLen,plan->d_in,len,plan->d_out,extension,plan->d_Key,plan->d_ret,plan->d_zcx,plan->d_acx,plan->d_strm,plan->d_state);
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
fprintf(stderr, "#DEVICE ERROR#: ", hipGetErrorString(error));
freeCUDA();
return ;
}
else
{
//Cap nhat lai pivot_base
pivot_base += GPU_N*threadcount;
hipMemcpy2D(hostPass, width*sizeof(char), plan->devPass, width*sizeof(char),width,threadcount, hipMemcpyDeviceToHost);
hipMemcpy(ret,plan->d_ret,sizeof(int)*threadcount,hipMemcpyDeviceToHost);
hipMemcpy(plan->Key,plan->d_Key,sizeof(unsigned char)*16*threadcount,hipMemcpyDeviceToHost);
//cout << "\n----------------------------------------------------------------------\n";
//cout << "\tTong thoi gian: " << cutGetTimerValue(timer) << "ms";
//cout << "\t" << pivot_base << "/" << GPU_N << " ma da thu.\n";
for (int i1=0; i1 < threadcount; i1++)
if (strcmp(hostPass[i1], "") != 0) //Tim thay ma giai
{
cout << "\nThe correct password is: ";
cout << hostPass[i1] << "\n";
found=1;
}
}
checkCudaErrors(hipDeviceSynchronize());
}
/*****************************************************************
Ket thuc qua trinh goi kernel nhieu lan, doc lap giua cac Device.
*****************************************************************/
hipFree(plan->devPass);
hipFree(plan->d_pre_terminal);
hipFree(plan->deviceArrPtr);
hipFree(plan->d_salt);
hipFree(plan->d_pvv);
hipFree(plan->d_out);
hipFree(plan->d_in);
hipFree(plan->d_Key);
hipFree(plan->d_ret);
hipFree(plan->d_zcx);
hipFree(plan->d_acx);
hipFree(plan->d_strm);
hipFree(plan->d_state);
free(plan->Key);
/*****************************************************************
Lenh dinh thoi gian va lenh thoat tat ca cac tien trinh
******************************************************************/
hipDeviceReset();
CUT_THREADEND;
/*****************************************************************
Ket thuc
******************************************************************/
}
void crack(){
/*
unsigned int timer=0;
cutCreateTimer(&timer);
cutStartTimer(timer);
*/
/*Moi tien trinh tren CPU quan ly mot GPU, ta co GPU_N nen can co GPU_N tien trinh song song tren Host quan ly */
for(int GPUIndex = 0; GPUIndex < GPU_N; GPUIndex++)
threadID[GPUIndex] = cutStartThread((CUT_THREADROUTINE)solverThread, &plan[GPUIndex]);
printf("main(): waiting...\n");
/*
cutWaitForThreads(threadID, GPU_N);
cout <<cutGetTimerValue(timer) << "ms\n";
cout << "\n---------------------------------------------------------------------------------------------------------------\n";
time_parallel += cutGetTimerValue(timer);
cutStopTimer(timer);
cutDeleteTimer(timer);
*/
}
void readGrammar(char *filename1, char *filename2, int *count)
{
memset(Sk, 0, 33*sizeof(int));
printf("\n--> Nap tap luat sinh mat khau ....");
*count = ReadRules(filename1); //argv[2]
printf("OK \n");
FILE *fp;
char buffer[80] = "";
fp =fopen(filename2, "r"); //argv[3]
//Khoi tao hostArray.
if (fp != NULL)
{
int h = 0;
while(fgets(buffer, sizeof(buffer), fp) != NULL)
{
if(h==0)
{
v_prev= v_curr = strlen(buffer)-1;
Sk[v_curr] = h;
}
v_curr = strlen(buffer)-1;
if(v_curr != v_prev)
{
Sk[v_curr] = h;
v_prev = v_curr;
}
strcpy(hostArray[h], buffer);
printf(">>> read buffer: %s\n", hostArray[h]);
h++;
strcpy(buffer, "");
}
fclose(fp);
}
}
int checkInfo(char *filename)
{
ZIP_LOCAL_FILE_HEADER* lfh;
FILE* pt;
pt = fopen(filename, "rb");
lfh = (ZIP_LOCAL_FILE_HEADER*) malloc(sizeof(ZIP_LOCAL_FILE_HEADER));
if(!pt) return -1;
read_lfh(lfh, pt);
if(get_bit_encrypted_ornot(lfh, pt) != 1)
{
cout<< "File is not encrypted";
return -1;
}
else
{
char *cp;
cp = strrchr(get_fname(lfh, pt), '.');
if (strcmp(cp, ".pdf")==0) extension = dotpdf;
if (strcmp(cp, ".doc")==0) extension = dotdoc;
if (strcmp(cp, ".txt")==0) extension = dottxt;
*cp=0;
printf("File is encrypted , parameters:");
/*---------------------------------------------------------------------------------------------
Lay gia tri salt, authentication code, password verification value chi co, khi file da encrypt
----------------------------------------------------------------------------------------------*/
display_salt_pvv_ac(lfh,pt,S,&saltLen,stored_pvv,&dkLen);
fseek(pt, 30 + lfh->fn_length + lfh->ex_length + SALT_LENGTH(1) + PWD_VER_LENGTH, SEEK_SET);
len = (int)fread(in, sizeof(unsigned char),CHUNK, pt);
fclose(pt);
}
return 1;
}
void multiFunction(int light, int count)
{
struct entry *working_value = NULL;
struct entry *head = NULL;
struct entry *tail = NULL;
int status = 0;
if(light == 6)
{
//Goi khoi tao host mot lan
initHost();
}
char temp_preterminal[20] = "";
char search_characters[4]="";
char temp_base[20]="";
//Xay dung cay va duyet cau truc dang pre-terminal
//1. Phan 1: all base structs
for(int i = 1; i< count; i++)
{
if(strcmp(Column1[i],"S") == 0)
{
//Xoa search_character va temp_terminal
strcpy(search_characters,"");
strcpy(temp_preterminal,"");
working_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(working_value->base,Column2[i]);
working_value->pivot = 0;
working_value->num_strings = 0;
for(int j = 0; j< strlen(Column2[i]); j++)
{
if(Column2[i][j] == 'L' || Column2[i][j] == 'S' || Column2[i][j]=='D')
working_value->num_strings++;
}
//Tinh xac suat va pre_terminal
working_value->probability = Column3[i];
//Duyet cau truc cua Column2 de tinh xac suat.
int k;
char temp[2];
for(int j = 0; j< strlen(Column2[i]);)
{
k = 0;
search_characters[k] = Column2[i][j++];
while((Column2[i][j] != 'D') && (Column2[i][j] != 'L') && (Column2[i][j] != 'S'))
{
search_characters[++k] = Column2[i][j++];
if(Column2[i][j] == '\0') break;
}
//Thoat co nghia vi tri j da la bat dau mot ki tu moi, nhung chua gan. k tang len mot gia tri
search_characters[++k] = '\0';
//Kiem tra ki tu dau co phai la ki tu L. Neu la L chi cap nhat lai xau pre_terminal de phan biet. Khong
//cap nhat xac suat.
if (search_characters[0] == 'L')
{
temp[0] = 'H';
temp[1] = '\0';
strcat(temp_preterminal, temp);
strcat(temp_preterminal,search_characters);
strcat(temp_preterminal, temp);
}
else
{
//Neu khong phai, thi tim kiem va cap nhat lai xac suat
for(int t = 1; t < count; t ++)
{
if(strcmp(Column1[t],search_characters) == 0)
{
strcat(temp_preterminal,Column2[t]);
working_value->probability = working_value->probability * Column3[t];
break;
}
}
} //Ket thuc la ki tu D hoac S
//Cap nhat xac suat lon nhat roi thoat
}// Het vong for, thi da xac dinh duoc xac suat, va dong thoi la pre_terminal
strcpy(working_value->pre_terminal,temp_preterminal);
//Buoc cuoi cua giai doan 1: Them no vao queue uu tien
if(status ==0)
{
working_value->next = NULL;
working_value->prev = NULL;
head = tail = working_value;
status = 1;
}
else
{
//Them vao cuoi queue
working_value->next = NULL;
working_value->prev = tail;
tail->next = working_value;
tail = working_value;
}
working_value = NULL;
}
else
{
break;
} //ket thuc cua if-else
} //Ket thuc cua for.
/*Buoc 2. Vua xay dung cay, vua dua ra danh sach mat khau, lam dau vao cho giai thuat PBKDF2
cai nay co the dua vao mot ham, phan cap chuc nang
Co the toi uu chuc nang tim kiem, thuc hien pop nhanh hon.
Giai thuat nay co the thuc hien song song hoa duoc, giong nhu giai thuat tim kiem song song tren danh sach.
*/
int order=0;
working_value = Pop(head);
if(light == 6)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order", "Keys","Time");
cout << "\n----------------------------------------**-----------------------------------**----------------------------------\n";
}
else if(light == 3)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order");
cout << "\n-------------------------------**----------------------------**-----------------------------\n";
}
while((working_value != NULL)&&(!found))
{
order++;
int qualities = 0;
int sk;
for(int h = 0; h< strlen(working_value->pre_terminal); h++)
if(working_value->pre_terminal[h] == 'L')
{
qualities++;
sk = (int)working_value->pre_terminal[h + 1] - 48;
}
strcpy(temp2, working_value->pre_terminal);
if(light == 6)
{
/* truyen cac thong so pre_terminal lay duoc tu thao tac Pop sang devce - GPU_N device*/
for(int deviceIndex = 0; deviceIndex < GPU_N; deviceIndex++)
{
plan[deviceIndex].wordCount = Sk[sk+1] - Sk[sk];
plan[deviceIndex].startIndex = Sk[sk];
}
/*Goi song song GPU_N tien trinh tren CPU quan ly GPU_N GPU*/
//Sinh cac mat khau bang cach ghep cau truc pre_terminal voi tu dien chua cac tu co nghia.
printf("\n%-12s %-15s %-10d %-15d %-15f %-15d %-15ld",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order,Sk[sk+1] - Sk[sk]);
crack();
}
else if(light == 3)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
/*DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,3);*/
}
else if(light == 4)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
/*DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,4);*/
}
//Tiep tuc xay dung cay, insert va pop entry
for(int i= working_value->pivot; i< working_value->num_strings; i++)
{
strcpy(temp_base, working_value->base); // temp_base = "D1L3S2D1"
/*Khai bao du lieu, chi co pham vi trong vong for nay */
int k; // Chi so chay trung gian
char temp[2]; // temp[2] = 'L' || 'S' || 'D'
char temp1[2]; // temp1[2] = 'H' -> Dung trong phan cach L.
int index = -1; // index cua variable, chi biet co replace duoc hay khong.
strcpy(temp_preterminal,""); // xoa xau temp_preterminal, de sau do dung lai (khai bao gan ham main)
// child_value->pre_terminal = temp_preterminal.
int segment = 0; // chi so base, cho biet cat tu xau base tu dau den dau.
// vi du 4L3$$4, cat S2 tu dau den dau
char temp_copy[10]; // xau tu segment cho den het (segment + (int)atoi(search_characters)
/*Phan tich temp_base de lay chu so va chi thi la D, L hay S. No cho phep minh biet cach doc bao nhieu ki
tu tu cau truc pre_terminal cua working_value sang child_working_value*/
//Bien cho biet co chen them vao duoc hay khong
bool agreement = false;
float reprobability = working_value->probability;
for(int j = 0; j < strlen(temp_base);)
{
strcpy(search_characters,"");// xoa search_characters, dung lai bien o phan tren.
// chang han search_characters = 1 hoac 2 hoac 1, nho loc bo ki tu
// D truoc D1, ki tu S truoc S2, ki tu D truoc D1 cua temp_base.
/* Lay ki tu dau tien cua temp_base*/
k=0;
temp[0] = temp_base[j];
temp[1] = '\0';
/*end */
j = j +1;
while((temp_base[j] != 'D') && (temp_base[j] != 'L') && (temp_base[j] != 'S'))
{
search_characters[k++] = temp_base[j++];
if(temp_base[j] == '\0') break;
}
//Ket thuc xau
search_characters[k] = '\0';
index++;
//temp_preterminal
if(temp[0] == 'L')
{
if(index == i)
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
temp1[0] = 'H';
temp1[1] = '\0';
strcat(temp_preterminal, temp1);
strcat(temp_preterminal, temp);
strcat(temp_preterminal, search_characters);
strcat(temp_preterminal, temp1);
//Phai cap nhat lai segment
segment = segment + 3 + strlen(search_characters);
}
else
{
//Phai tinh den so sanh index voi chi so i.
if(index != i)
{
//Chi don thuan la copy cau truc tu vi tri segment cho den het (segment + (int)atoi(search_characters))
strcpy(temp_copy,""); // Chi luu tru tam thoi
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
//Cap nhat lai segment, de cho lan copy sau.
segment = segment + (int)atoi(search_characters);
strcat(temp_preterminal, temp_copy);
}
else if(temp[0] == 'L')
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
else //Neu vao trong day ma khong thay the xau moi thi huy bo.
{
//Ghep giua temp voi search_characters lai voi nhau de ra dang, chang han nhu S2 => Goi la search_str.
//Trich xuat ki tu o working_value->pre_terminal, tai vi tri segment den segment + (int)atoi(search_characters).
//duoc goi la pointed_str. Neu thay the duoc, thi cap nhat luon xac suat cua no, dong thoi tao ra them duoc nut
//moi
char search_str[4];
char pointed_str[4];
strcpy(search_str,temp);
strcat(search_str,search_characters);
strcpy(temp_copy,""); //ok da xoa temp_copy
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
strcpy(pointed_str, temp_copy);
//Tim kiem de thay the. Chu yeu la do tim vi tri d.
for(int d = 1; d < count; d++)
{
if(strcmp(Column1[d],search_str)==0)
{
if(strcmp(Column2[d], pointed_str)==0)
{
segment += strlen(pointed_str);
if( (d+1 < count) && (strcmp(Column1[d+1],search_str)==0))
{
//Them moi duoc, nghia la con ki tu thay the, xu ly tai day
//Neu thay the duoc, thi copy cho den het j
strcat(temp_preterminal,Column2[d+1]);
// Tinh lai xac suat
reprobability = (reprobability*Column3[d+1])/Column3[d];
agreement = true;
break;
}
else
{
//Vi tri nay da het cho. Quay tro ve tang i len, cho den het.
agreement = false;
break;
}
}
}
} //Ket thuc for tim kiem xau thay the
} //Ket thuc else - index
} //Ket thuc else - L
} //Ket thuc vong lap theo temp_base.
if(agreement == true)
{
//Them moi vao cuoi danh sach dang xet.
struct entry *child_value;
child_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(child_value->base,working_value->base);
strcpy(child_value->pre_terminal,temp_preterminal);
child_value->pivot = i;
child_value->num_strings = working_value->num_strings;
child_value->probability = reprobability;
child_value->next = NULL;
child_value->prev = tail;
tail->next = child_value;
tail = child_value;
}
} //Ket thuc for theo bien chay i
//Sau do thi giai phong entry working_value.
if(working_value->prev == NULL)
{
if(working_value->next == NULL)
{
free(working_value);
head = tail = NULL;
}
else
{
(working_value->next)->prev = NULL;
head = (working_value->next);
free(working_value);
}
}
else
{
if(working_value->next == NULL)
{
(working_value->prev)->next = NULL;
tail = working_value->prev;
free(working_value);
}
else
{
(working_value->next)->prev = working_value->prev;
(working_value->prev)->next = working_value->next;
free(working_value);
}
}
working_value = Pop(head);
} // Ket thuc vong lap while
if(light == 6)
{
cout << "\nThe end ...\n";
}
}
void checkCandidatePasswords()
{
int P[60]={0};
string password = "";
int passLen;
cin.get();
printf("\nNhap mat khau kiem tra:\n");
getline(cin, password);
passLen = password.length();
for(int i = 0; i < passLen; i++)
P[i] = password[i];
if(PBKDF2_1(S,saltLen,stored_pvv,dkLen,P, passLen) != 0)
printf("\nLa mat khau ung cu");
else
printf("\nKhong phai la mat khau ung cu");
}
int main(int argc, char *argv[]){
int isEncrypted = 0;
char ch;
int count;
while(1)
{
printf("\n1.Thong tin co ban cua tep nen Zip va van pham");
printf("\n2.Kiem tra mot mat khau la ung cu");
printf("\n3.Sinh mat khau tuan tu");
printf("\n4.Tap mat khau ung cu - tt tuan tu");
printf("\n5.Sinh mat khau song song");
printf("\n6.Pha mat khau song song");
printf("\n7.Thoat chuong trinh");
printf("\nLua chon chuc nang(1->7):");
fflush(stdin);
fflush(stdin);
ch = getchar();
switch(ch)
{
case '1':
isEncrypted = checkInfo(argv[1]);
printf("\nisEncrypted = %d", isEncrypted);
if (isEncrypted == 1) readGrammar(argv[2], argv[3], &count);
cin.get();
break;
case '2':
if(isEncrypted == 1)
{
checkCandidatePasswords();
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '3':
multiFunction(3,count);
cin.get();
break;
case '4':
multiFunction(4,count);
cin.get();
break;
case '5':
multiFunction(5,count);
cin.get();
break;
case '6':
if (isEncrypted == 1)
{
multiFunction(6,count);
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '7':exit(1);
}
}
}
| bc4a4e6de1d7b156299076a22c5c794519512f07.cu | #include <iostream>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <sstream>
/*
#include <multithreading.h>
#include <cutil.h>
#include <cutil_inline.h>
#include <cuda_runtime_api.h>
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#include <helper_functions.h>
/*#include <helper_cuda_drvapi.h>*/
/*#include <nvrtc_helper.h>*/
#include <multithreading.h>
#include "crackkernel.cu"
#include "entry.cpp"
#include "zip.c"
#include "pbkdf2.cpp"
using namespace std;
dim3 threads(128);
dim3 blocks(12,10);
int threadcount = threads.x*blocks.x*blocks.y;
//Chot dung de chia cot cua hang.
int width = 80;
unsigned char extension,in[CHUNK],found=0;
/*Bien luu thong tin ve tep da duoc ma hoa
- mang chua gia tri PVV
- mang chua gia tri salt
*/
int dkLen,saltLen,len;
int stored_pvv[2];
int S[16];
/*Danh cho da GPU, lay ve so GPU duoc CUDA ho tro, co the dung de tinh toan*/
const int MAX_GPU_COUNT = 8;
int GPU_N;
CUTThread threadID[MAX_GPU_COUNT];
TGPUplan plan[MAX_GPU_COUNT];
char *emptyArr;
char temp2[20];
float time_parallel = 0;
//Chua cac tu trong tu dien
char hostArray[869229][80];
/*Ket thuc*/
fcrypt_ctx h_zcx[1];
/*Bien chua thong tin van pham*/
int v_prev, v_curr;
int Sk[33];
/*Ket thuc bien chua thong tin van pham*/
void initHost(){
/*Hien thi so GPU duoc ho tro*/
checkCudaErrors(cudaGetDeviceCount(&GPU_N));
if(GPU_N > MAX_GPU_COUNT) GPU_N = MAX_GPU_COUNT;
printf("\nCUDA-capable device count: %i\n", GPU_N);
/*Ket thuc qua trinh hien thi*/
emptyArr = (char*)malloc(sizeof(char)*width*threadcount);
memset(emptyArr, '\0', sizeof(char)*width*threadcount);
for (int i=0;i<GPU_N;i++)
{
//khoi tao plan->device
plan[i].device=i;
// Chuong trinh moi giai quyet van de la quantities = 1
plan[i].quantities = 1;
}
//khoi tao cho zcx
h_zcx->mode = 1;
h_zcx->encr_pos = BLOCK_SIZE;
memset(h_zcx->nonce, 0, BLOCK_SIZE * sizeof(unsigned char));
}
void freeCUDA()
{
for (int i=0;i<GPU_N;i++)
{
cudaFree(plan[i].devPass);
cudaFree(plan[i].d_pre_terminal);
cudaFree(plan[i].deviceArrPtr);
cudaFree(plan[i].d_salt);
cudaFree(plan[i].d_pvv);
cudaFree(plan[i].d_in);
cudaFree(plan[i].d_out);
}
}
static CUT_THREADPROC solverThread(TGPUplan *plan){
/******************************************************************
Khai bao bien
******************************************************************/
//devPitch - truyen vao nhung khi lay gia tri ra thi lai khong dung den no.
size_t devPitch;
int pivot_base = 0;
int ret[threadcount];
//Khai bao mang hostPass de hien thi cac mat khau tra ve.
char hostPass[threadcount][80];
memset(hostPass,'\0', sizeof(char)*threadcount*80);
/*****************************************************************
Ket thuc khai bao bien
******************************************************************/
memset(ret,-1,sizeof(int)*threadcount);
/*****************************************************************
Cap phat bo nho tren moi GPU, truyen du lieu can cho tinh toan tu Host len Device
*****************************************************************/
//Set device
checkCudaErrors(cudaSetDevice(plan->device));
cudaMallocPitch((void**)&plan->devPass, &devPitch, width * sizeof(char), threadcount);
//Khoi tao plan->deviceArrPtr cho moi GPU
cudaMallocPitch((void**)&plan->deviceArrPtr, &devPitch, width * sizeof(char), plan->wordCount);
cudaMemcpy2D(plan->deviceArrPtr, width*sizeof(char), hostArray + plan->startIndex, width*sizeof(char), width, plan->wordCount, cudaMemcpyHostToDevice);
//Khoi tao gia tri kiem tra mat khau tren moi GPU
cudaMalloc((void**)&plan->d_salt, sizeof(int) * 16);
cudaMemcpy(plan->d_salt, S, sizeof(int) * 16, cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_pvv, sizeof(int) * 2);
cudaMemcpy(plan->d_pvv, stored_pvv, sizeof(int) * 2, cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_pre_terminal, sizeof(char) * strlen(temp2));
cudaMemcpy(plan->d_pre_terminal, temp2, sizeof(char) * strlen(temp2), cudaMemcpyHostToDevice);
cudaMalloc((void**)&plan->d_out, threadcount*CHUNK*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_in,threadcount*CHUNK*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_Key,threadcount*16*sizeof(unsigned char));
cudaMalloc((void**)&plan->d_ret,threadcount*sizeof(unsigned int));
plan->Key = (unsigned char *)malloc(sizeof(unsigned char)*16*threadcount);
//cap phat bo nho cho phan giai ma
cudaMalloc((void**)&plan->d_zcx,threadcount*sizeof(fcrypt_ctx));
cudaMalloc((void**)&plan->d_acx,threadcount*sizeof(aes_ctx));
//cap phat bo nho cho phan giai nen
cudaMalloc((void**)&plan->d_strm, threadcount*sizeof(z_stream));
cudaMalloc((void**)&plan->d_state,threadcount*sizeof(struct inflate_state FAR));
/****************************************************************
Ket thuc qua trinh truyen du lieu
*****************************************************************/
/****************************************************************
Qua trinh goi Kernel nhieu lan, viec goi la doc lap giua cac Device
*****************************************************************/
pivot_base = plan->device*threadcount;
//checkCudaErrors(cudaThreadSynchronize());
while((pivot_base < plan->wordCount)&&(!found))
{
//Reset lai cac gia tri truoc moi lan chay Kernel
cudaMemcpy2D(plan->devPass, width*sizeof(char), emptyArr,width*sizeof(char), width, threadcount, cudaMemcpyHostToDevice);
cudaMemset (plan->d_out, 0, threadcount*CHUNK);
for (int i=0;i<threadcount;i++) {
cudaMemcpy(plan->d_in+i*CHUNK, in, CHUNK*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(plan->d_zcx+i, h_zcx, sizeof(fcrypt_ctx), cudaMemcpyHostToDevice);}
cudaMemset (plan->d_ret, -1, threadcount*sizeof(int));
//chay kernel
RunKernel<<<blocks, threads>>>(pivot_base, plan->devPass,plan->deviceArrPtr, width, plan->quantities, plan->wordCount, plan->d_pre_terminal,strlen(temp2), plan->d_salt, saltLen, plan->d_pvv, dkLen,plan->d_in,len,plan->d_out,extension,plan->d_Key,plan->d_ret,plan->d_zcx,plan->d_acx,plan->d_strm,plan->d_state);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
fprintf(stderr, "#DEVICE ERROR#: ", cudaGetErrorString(error));
freeCUDA();
return ;
}
else
{
//Cap nhat lai pivot_base
pivot_base += GPU_N*threadcount;
cudaMemcpy2D(hostPass, width*sizeof(char), plan->devPass, width*sizeof(char),width,threadcount, cudaMemcpyDeviceToHost);
cudaMemcpy(ret,plan->d_ret,sizeof(int)*threadcount,cudaMemcpyDeviceToHost);
cudaMemcpy(plan->Key,plan->d_Key,sizeof(unsigned char)*16*threadcount,cudaMemcpyDeviceToHost);
//cout << "\n----------------------------------------------------------------------\n";
//cout << "\tTong thoi gian: " << cutGetTimerValue(timer) << "ms";
//cout << "\t" << pivot_base << "/" << GPU_N << " ma da thu.\n";
for (int i1=0; i1 < threadcount; i1++)
if (strcmp(hostPass[i1], "") != 0) //Tim thay ma giai
{
cout << "\nThe correct password is: ";
cout << hostPass[i1] << "\n";
found=1;
}
}
checkCudaErrors(cudaThreadSynchronize());
}
/*****************************************************************
Ket thuc qua trinh goi kernel nhieu lan, doc lap giua cac Device.
*****************************************************************/
cudaFree(plan->devPass);
cudaFree(plan->d_pre_terminal);
cudaFree(plan->deviceArrPtr);
cudaFree(plan->d_salt);
cudaFree(plan->d_pvv);
cudaFree(plan->d_out);
cudaFree(plan->d_in);
cudaFree(plan->d_Key);
cudaFree(plan->d_ret);
cudaFree(plan->d_zcx);
cudaFree(plan->d_acx);
cudaFree(plan->d_strm);
cudaFree(plan->d_state);
free(plan->Key);
/*****************************************************************
Lenh dinh thoi gian va lenh thoat tat ca cac tien trinh
******************************************************************/
cudaThreadExit();
CUT_THREADEND;
/*****************************************************************
Ket thuc
******************************************************************/
}
void crack(){
/*
unsigned int timer=0;
cutCreateTimer(&timer);
cutStartTimer(timer);
*/
/*Moi tien trinh tren CPU quan ly mot GPU, ta co GPU_N nen can co GPU_N tien trinh song song tren Host quan ly */
for(int GPUIndex = 0; GPUIndex < GPU_N; GPUIndex++)
threadID[GPUIndex] = cutStartThread((CUT_THREADROUTINE)solverThread, &plan[GPUIndex]);
printf("main(): waiting...\n");
/*
cutWaitForThreads(threadID, GPU_N);
cout <<cutGetTimerValue(timer) << "ms\n";
cout << "\n---------------------------------------------------------------------------------------------------------------\n";
time_parallel += cutGetTimerValue(timer);
cutStopTimer(timer);
cutDeleteTimer(timer);
*/
}
void readGrammar(char *filename1, char *filename2, int *count)
{
memset(Sk, 0, 33*sizeof(int));
printf("\n--> Nap tap luat sinh mat khau ....");
*count = ReadRules(filename1); //argv[2]
printf("OK \n");
FILE *fp;
char buffer[80] = "";
fp =fopen(filename2, "r"); //argv[3]
//Khoi tao hostArray.
if (fp != NULL)
{
int h = 0;
while(fgets(buffer, sizeof(buffer), fp) != NULL)
{
if(h==0)
{
v_prev= v_curr = strlen(buffer)-1;
Sk[v_curr] = h;
}
v_curr = strlen(buffer)-1;
if(v_curr != v_prev)
{
Sk[v_curr] = h;
v_prev = v_curr;
}
strcpy(hostArray[h], buffer);
printf(">>> read buffer: %s\n", hostArray[h]);
h++;
strcpy(buffer, "");
}
fclose(fp);
}
}
int checkInfo(char *filename)
{
ZIP_LOCAL_FILE_HEADER* lfh;
FILE* pt;
pt = fopen(filename, "rb");
lfh = (ZIP_LOCAL_FILE_HEADER*) malloc(sizeof(ZIP_LOCAL_FILE_HEADER));
if(!pt) return -1;
read_lfh(lfh, pt);
if(get_bit_encrypted_ornot(lfh, pt) != 1)
{
cout<< "File is not encrypted";
return -1;
}
else
{
char *cp;
cp = strrchr(get_fname(lfh, pt), '.');
if (strcmp(cp, ".pdf")==0) extension = dotpdf;
if (strcmp(cp, ".doc")==0) extension = dotdoc;
if (strcmp(cp, ".txt")==0) extension = dottxt;
*cp=0;
printf("File is encrypted , parameters:");
/*---------------------------------------------------------------------------------------------
Lay gia tri salt, authentication code, password verification value chi co, khi file da encrypt
----------------------------------------------------------------------------------------------*/
display_salt_pvv_ac(lfh,pt,S,&saltLen,stored_pvv,&dkLen);
fseek(pt, 30 + lfh->fn_length + lfh->ex_length + SALT_LENGTH(1) + PWD_VER_LENGTH, SEEK_SET);
len = (int)fread(in, sizeof(unsigned char),CHUNK, pt);
fclose(pt);
}
return 1;
}
void multiFunction(int light, int count)
{
struct entry *working_value = NULL;
struct entry *head = NULL;
struct entry *tail = NULL;
int status = 0;
if(light == 6)
{
//Goi khoi tao host mot lan
initHost();
}
char temp_preterminal[20] = "";
char search_characters[4]="";
char temp_base[20]="";
//Xay dung cay va duyet cau truc dang pre-terminal
//1. Phan 1: all base structs
for(int i = 1; i< count; i++)
{
if(strcmp(Column1[i],"S") == 0)
{
//Xoa search_character va temp_terminal
strcpy(search_characters,"");
strcpy(temp_preterminal,"");
working_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(working_value->base,Column2[i]);
working_value->pivot = 0;
working_value->num_strings = 0;
for(int j = 0; j< strlen(Column2[i]); j++)
{
if(Column2[i][j] == 'L' || Column2[i][j] == 'S' || Column2[i][j]=='D')
working_value->num_strings++;
}
//Tinh xac suat va pre_terminal
working_value->probability = Column3[i];
//Duyet cau truc cua Column2 de tinh xac suat.
int k;
char temp[2];
for(int j = 0; j< strlen(Column2[i]);)
{
k = 0;
search_characters[k] = Column2[i][j++];
while((Column2[i][j] != 'D') && (Column2[i][j] != 'L') && (Column2[i][j] != 'S'))
{
search_characters[++k] = Column2[i][j++];
if(Column2[i][j] == '\0') break;
}
//Thoat co nghia vi tri j da la bat dau mot ki tu moi, nhung chua gan. k tang len mot gia tri
search_characters[++k] = '\0';
//Kiem tra ki tu dau co phai la ki tu L. Neu la L chi cap nhat lai xau pre_terminal de phan biet. Khong
//cap nhat xac suat.
if (search_characters[0] == 'L')
{
temp[0] = 'H';
temp[1] = '\0';
strcat(temp_preterminal, temp);
strcat(temp_preterminal,search_characters);
strcat(temp_preterminal, temp);
}
else
{
//Neu khong phai, thi tim kiem va cap nhat lai xac suat
for(int t = 1; t < count; t ++)
{
if(strcmp(Column1[t],search_characters) == 0)
{
strcat(temp_preterminal,Column2[t]);
working_value->probability = working_value->probability * Column3[t];
break;
}
}
} //Ket thuc la ki tu D hoac S
//Cap nhat xac suat lon nhat roi thoat
}// Het vong for, thi da xac dinh duoc xac suat, va dong thoi la pre_terminal
strcpy(working_value->pre_terminal,temp_preterminal);
//Buoc cuoi cua giai doan 1: Them no vao queue uu tien
if(status ==0)
{
working_value->next = NULL;
working_value->prev = NULL;
head = tail = working_value;
status = 1;
}
else
{
//Them vao cuoi queue
working_value->next = NULL;
working_value->prev = tail;
tail->next = working_value;
tail = working_value;
}
working_value = NULL;
}
else
{
break;
} //ket thuc cua if-else
} //Ket thuc cua for.
/*Buoc 2. Vua xay dung cay, vua dua ra danh sach mat khau, lam dau vao cho giai thuat PBKDF2
cai nay co the dua vao mot ham, phan cap chuc nang
Co the toi uu chuc nang tim kiem, thuc hien pop nhanh hon.
Giai thuat nay co the thuc hien song song hoa duoc, giong nhu giai thuat tim kiem song song tren danh sach.
*/
int order=0;
working_value = Pop(head);
if(light == 6)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order", "Keys","Time");
cout << "\n----------------------------------------**-----------------------------------**----------------------------------\n";
}
else if(light == 3)
{
printf("\n%-12s %-15s %-10s %-15s %-15s %-15s\n","Base","pre_terminal","pivot","num_strings","probability","order");
cout << "\n-------------------------------**----------------------------**-----------------------------\n";
}
while((working_value != NULL)&&(!found))
{
order++;
int qualities = 0;
int sk;
for(int h = 0; h< strlen(working_value->pre_terminal); h++)
if(working_value->pre_terminal[h] == 'L')
{
qualities++;
sk = (int)working_value->pre_terminal[h + 1] - 48;
}
strcpy(temp2, working_value->pre_terminal);
if(light == 6)
{
/* truyen cac thong so pre_terminal lay duoc tu thao tac Pop sang devce - GPU_N device*/
for(int deviceIndex = 0; deviceIndex < GPU_N; deviceIndex++)
{
plan[deviceIndex].wordCount = Sk[sk+1] - Sk[sk];
plan[deviceIndex].startIndex = Sk[sk];
}
/*Goi song song GPU_N tien trinh tren CPU quan ly GPU_N GPU*/
//Sinh cac mat khau bang cach ghep cau truc pre_terminal voi tu dien chua cac tu co nghia.
printf("\n%-12s %-15s %-10d %-15d %-15f %-15d %-15ld",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order,Sk[sk+1] - Sk[sk]);
crack();
}
else if(light == 3)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
/*DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,3);*/
}
else if(light == 4)
{
printf("%-12s %-15s %-10d %-15d %-15f %-15d\n",working_value->base,working_value->pre_terminal,
working_value->pivot,working_value->num_strings, working_value->probability,order);
/*DisplayGuestPassword(working_value->pre_terminal, strlen(working_value->pre_terminal),hostArray,1, Sk[sk], Sk[sk+1], S, saltLen, stored_pvv, dkLen,4);*/
}
//Tiep tuc xay dung cay, insert va pop entry
for(int i= working_value->pivot; i< working_value->num_strings; i++)
{
strcpy(temp_base, working_value->base); // temp_base = "D1L3S2D1"
/*Khai bao du lieu, chi co pham vi trong vong for nay */
int k; // Chi so chay trung gian
char temp[2]; // temp[2] = 'L' || 'S' || 'D'
char temp1[2]; // temp1[2] = 'H' -> Dung trong phan cach L.
int index = -1; // index cua variable, chi biet co replace duoc hay khong.
strcpy(temp_preterminal,""); // xoa xau temp_preterminal, de sau do dung lai (khai bao gan ham main)
// child_value->pre_terminal = temp_preterminal.
int segment = 0; // chi so base, cho biet cat tu xau base tu dau den dau.
// vi du 4L3$$4, cat S2 tu dau den dau
char temp_copy[10]; // xau tu segment cho den het (segment + (int)atoi(search_characters)
/*Phan tich temp_base de lay chu so va chi thi la D, L hay S. No cho phep minh biet cach doc bao nhieu ki
tu tu cau truc pre_terminal cua working_value sang child_working_value*/
//Bien cho biet co chen them vao duoc hay khong
bool agreement = false;
float reprobability = working_value->probability;
for(int j = 0; j < strlen(temp_base);)
{
strcpy(search_characters,"");// xoa search_characters, dung lai bien o phan tren.
// chang han search_characters = 1 hoac 2 hoac 1, nho loc bo ki tu
// D truoc D1, ki tu S truoc S2, ki tu D truoc D1 cua temp_base.
/* Lay ki tu dau tien cua temp_base*/
k=0;
temp[0] = temp_base[j];
temp[1] = '\0';
/*end */
j = j +1;
while((temp_base[j] != 'D') && (temp_base[j] != 'L') && (temp_base[j] != 'S'))
{
search_characters[k++] = temp_base[j++];
if(temp_base[j] == '\0') break;
}
//Ket thuc xau
search_characters[k] = '\0';
index++;
//temp_preterminal
if(temp[0] == 'L')
{
if(index == i)
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
temp1[0] = 'H';
temp1[1] = '\0';
strcat(temp_preterminal, temp1);
strcat(temp_preterminal, temp);
strcat(temp_preterminal, search_characters);
strcat(temp_preterminal, temp1);
//Phai cap nhat lai segment
segment = segment + 3 + strlen(search_characters);
}
else
{
//Phai tinh den so sanh index voi chi so i.
if(index != i)
{
//Chi don thuan la copy cau truc tu vi tri segment cho den het (segment + (int)atoi(search_characters))
strcpy(temp_copy,""); // Chi luu tru tam thoi
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
//Cap nhat lai segment, de cho lan copy sau.
segment = segment + (int)atoi(search_characters);
strcat(temp_preterminal, temp_copy);
}
else if(temp[0] == 'L')
{
agreement = false;
break; //Thoat ra khoi for theo j.
}
else //Neu vao trong day ma khong thay the xau moi thi huy bo.
{
//Ghep giua temp voi search_characters lai voi nhau de ra dang, chang han nhu S2 => Goi la search_str.
//Trich xuat ki tu o working_value->pre_terminal, tai vi tri segment den segment + (int)atoi(search_characters).
//duoc goi la pointed_str. Neu thay the duoc, thi cap nhat luon xac suat cua no, dong thoi tao ra them duoc nut
//moi
char search_str[4];
char pointed_str[4];
strcpy(search_str,temp);
strcat(search_str,search_characters);
strcpy(temp_copy,""); //ok da xoa temp_copy
int q;
for(q = segment; q < segment + (int)atoi(search_characters); q++)
{
temp_copy[q-segment] = working_value->pre_terminal[q];
}
temp_copy[q-segment] = '\0';
strcpy(pointed_str, temp_copy);
//Tim kiem de thay the. Chu yeu la do tim vi tri d.
for(int d = 1; d < count; d++)
{
if(strcmp(Column1[d],search_str)==0)
{
if(strcmp(Column2[d], pointed_str)==0)
{
segment += strlen(pointed_str);
if( (d+1 < count) && (strcmp(Column1[d+1],search_str)==0))
{
//Them moi duoc, nghia la con ki tu thay the, xu ly tai day
//Neu thay the duoc, thi copy cho den het j
strcat(temp_preterminal,Column2[d+1]);
// Tinh lai xac suat
reprobability = (reprobability*Column3[d+1])/Column3[d];
agreement = true;
break;
}
else
{
//Vi tri nay da het cho. Quay tro ve tang i len, cho den het.
agreement = false;
break;
}
}
}
} //Ket thuc for tim kiem xau thay the
} //Ket thuc else - index
} //Ket thuc else - L
} //Ket thuc vong lap theo temp_base.
if(agreement == true)
{
//Them moi vao cuoi danh sach dang xet.
struct entry *child_value;
child_value = (struct entry *)malloc(sizeof(struct entry));
strcpy(child_value->base,working_value->base);
strcpy(child_value->pre_terminal,temp_preterminal);
child_value->pivot = i;
child_value->num_strings = working_value->num_strings;
child_value->probability = reprobability;
child_value->next = NULL;
child_value->prev = tail;
tail->next = child_value;
tail = child_value;
}
} //Ket thuc for theo bien chay i
//Sau do thi giai phong entry working_value.
if(working_value->prev == NULL)
{
if(working_value->next == NULL)
{
free(working_value);
head = tail = NULL;
}
else
{
(working_value->next)->prev = NULL;
head = (working_value->next);
free(working_value);
}
}
else
{
if(working_value->next == NULL)
{
(working_value->prev)->next = NULL;
tail = working_value->prev;
free(working_value);
}
else
{
(working_value->next)->prev = working_value->prev;
(working_value->prev)->next = working_value->next;
free(working_value);
}
}
working_value = Pop(head);
} // Ket thuc vong lap while
if(light == 6)
{
cout << "\nThe end ...\n";
}
}
void checkCandidatePasswords()
{
int P[60]={0};
string password = "";
int passLen;
cin.get();
printf("\nNhap mat khau kiem tra:\n");
getline(cin, password);
passLen = password.length();
for(int i = 0; i < passLen; i++)
P[i] = password[i];
if(PBKDF2_1(S,saltLen,stored_pvv,dkLen,P, passLen) != 0)
printf("\nLa mat khau ung cu");
else
printf("\nKhong phai la mat khau ung cu");
}
int main(int argc, char *argv[]){
int isEncrypted = 0;
char ch;
int count;
while(1)
{
printf("\n1.Thong tin co ban cua tep nen Zip va van pham");
printf("\n2.Kiem tra mot mat khau la ung cu");
printf("\n3.Sinh mat khau tuan tu");
printf("\n4.Tap mat khau ung cu - tt tuan tu");
printf("\n5.Sinh mat khau song song");
printf("\n6.Pha mat khau song song");
printf("\n7.Thoat chuong trinh");
printf("\nLua chon chuc nang(1->7):");
fflush(stdin);
fflush(stdin);
ch = getchar();
switch(ch)
{
case '1':
isEncrypted = checkInfo(argv[1]);
printf("\nisEncrypted = %d", isEncrypted);
if (isEncrypted == 1) readGrammar(argv[2], argv[3], &count);
cin.get();
break;
case '2':
if(isEncrypted == 1)
{
checkCandidatePasswords();
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '3':
multiFunction(3,count);
cin.get();
break;
case '4':
multiFunction(4,count);
cin.get();
break;
case '5':
multiFunction(5,count);
cin.get();
break;
case '6':
if (isEncrypted == 1)
{
multiFunction(6,count);
}
else
{
printf("\nPhai goi chuc nang 1 truoc");
}
cin.get();
break;
case '7':exit(1);
}
}
}
|
212bf7654b3c36b43250b2f67913943b062e5601.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nvcc `pkg-config --cflags opencv` main.cu `pkg-config --libs opencv` -o main.out
* ./main.out doge.jpg
*/
#include <iostream>
#include <string>
#include <stdio.h>
#include "helper.h"
#include "timeHelper.h"
#include "converter.cuh"
void cudaStub(std::string inputFileName, std::string outputFileName);
void openCvStub(std::string inputFileName, std::string outputFileName);
void cpuStub(std::string inputFileName, std::string outputFileName);
/**
* The main method is a stub which takes input file name and calls
* all three stubs (openCv, cpu, and gpu) for further prosessing
*/
int main(int argc, char **argv) {
std::string inputFileName;
std::string CudaOutputFileName = "./greyscale_output_cuda.png";
std::string openCvOutputFileName = "./greyscale_output_opencv.png";
std::string cpuOutputFileName = "./greyscale_output_serial.png";
switch (argc){
case 2:
inputFileName = std::string(argv[1]);
break;
default:
std::cerr << "Usage: ./grayscale input_file" << std::endl;
exit(1);
}
cpuStub(inputFileName, cpuOutputFileName);
openCvStub(inputFileName, openCvOutputFileName);
//int p;
//for(p=0;p<10;p++)
cudaStub(inputFileName, CudaOutputFileName);
//free the memory on the device
releaseCudaMemory();
return 0;
}
/**
* This is the CUDA stub that calls the method to reserve memory on the device and the host and
* then transfer the input from host to device.
* Then it calls another method which sets ups the grid and block size and inturn calls
* the CUDA kernel
*/
void cudaStub(std::string inputFileName, std::string outputFileName) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//Loading the image into the device memory
initialSetup(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, inputFileName);
printf("The number of rows in the image:%d\n",numRows());
printf("The number of cols in the image:%d\n",numCols());
printf("The total number of pixls in the image:%d\n",numRows()*numCols());
//Starting the timer to measure the performance
GpuTimer timer;
timer.Start();
//call to the stub that will call the kernel
colorToGreyCuda(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
//call to kernel is non blocking, so lets put sync primitive
hipDeviceSynchronize();
timer.Stop();
checkCudaErrors(hipGetLastError());
int err = printf("Time taken by CUDA conversion: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//Erroe in printing
std::cerr << "ERROR! STDOUT is CLOSED" << std::endl;
exit(1);
}
size_t numPixels = numRows()*numCols();
checkCudaErrors(hipMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost));
//output the image
finalTouches(outputFileName, h_greyImage);
}
/**
* This is the openCv stub that converts the input BGBA file to Greyscale
*/
void openCvStub(std::string inputFileName, std::string outputFileName) {
cv::Mat image;
image = cv::imread(inputFileName.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "error opening file: " << inputFileName << std::endl;
exit(1);
}
GpuTimer timer;
timer.Start();
//Converting to Grey
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
timer.Stop();
int err = printf("Time taken by OPEN CV conversion: %f msecs.\n", timer.Elapsed());
//output the image
cv::imwrite(outputFileName.c_str(), imageGrey);
}
/**
* Does the initial processing for cpu and in turn calls the method which actually converts
* RGBA image to Grey pixel by pixel serially
*/
void cpuStub(std::string inputFileName, std::string outputFileName) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//Load the image for input and output
initialSetup(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, inputFileName);
GpuTimer timer;
timer.Start();
rgbaToGreyscaleCpu(h_rgbaImage, h_greyImage, numRows(), numCols());
timer.Stop();
int err = printf("Time taken by SERIAL conversion: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//error in printing
std::cerr << "Error! STDOUT is Closed!" << std::endl;
exit(1);
}
finalTouches(outputFileName, h_greyImage);
}
| 212bf7654b3c36b43250b2f67913943b062e5601.cu | /*
* nvcc `pkg-config --cflags opencv` main.cu `pkg-config --libs opencv` -o main.out
* ./main.out doge.jpg
*/
#include <iostream>
#include <string>
#include <stdio.h>
#include "helper.h"
#include "timeHelper.h"
#include "converter.cuh"
void cudaStub(std::string inputFileName, std::string outputFileName);
void openCvStub(std::string inputFileName, std::string outputFileName);
void cpuStub(std::string inputFileName, std::string outputFileName);
/**
* The main method is a stub which takes input file name and calls
* all three stubs (openCv, cpu, and gpu) for further prosessing
*/
int main(int argc, char **argv) {
std::string inputFileName;
std::string CudaOutputFileName = "./greyscale_output_cuda.png";
std::string openCvOutputFileName = "./greyscale_output_opencv.png";
std::string cpuOutputFileName = "./greyscale_output_serial.png";
switch (argc){
case 2:
inputFileName = std::string(argv[1]);
break;
default:
std::cerr << "Usage: ./grayscale input_file" << std::endl;
exit(1);
}
cpuStub(inputFileName, cpuOutputFileName);
openCvStub(inputFileName, openCvOutputFileName);
//int p;
//for(p=0;p<10;p++)
cudaStub(inputFileName, CudaOutputFileName);
//free the memory on the device
releaseCudaMemory();
return 0;
}
/**
* This is the CUDA stub that calls the method to reserve memory on the device and the host and
* then transfer the input from host to device.
* Then it calls another method which sets ups the grid and block size and inturn calls
* the CUDA kernel
*/
void cudaStub(std::string inputFileName, std::string outputFileName) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//Loading the image into the device memory
initialSetup(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, inputFileName);
printf("The number of rows in the image:%d\n",numRows());
printf("The number of cols in the image:%d\n",numCols());
printf("The total number of pixls in the image:%d\n",numRows()*numCols());
//Starting the timer to measure the performance
GpuTimer timer;
timer.Start();
//call to the stub that will call the kernel
colorToGreyCuda(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols());
//call to kernel is non blocking, so lets put sync primitive
cudaDeviceSynchronize();
timer.Stop();
checkCudaErrors(cudaGetLastError());
int err = printf("Time taken by CUDA conversion: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//Erroe in printing
std::cerr << "ERROR! STDOUT is CLOSED" << std::endl;
exit(1);
}
size_t numPixels = numRows()*numCols();
checkCudaErrors(cudaMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost));
//output the image
finalTouches(outputFileName, h_greyImage);
}
/**
* This is the openCv stub that converts the input BGBA file to Greyscale
*/
void openCvStub(std::string inputFileName, std::string outputFileName) {
cv::Mat image;
image = cv::imread(inputFileName.c_str(), CV_LOAD_IMAGE_COLOR);
if (image.empty()) {
std::cerr << "error opening file: " << inputFileName << std::endl;
exit(1);
}
GpuTimer timer;
timer.Start();
//Converting to Grey
cv::cvtColor(image, imageRGBA, CV_BGR2RGBA);
//allocate memory for the output
imageGrey.create(image.rows, image.cols, CV_8UC1);
timer.Stop();
int err = printf("Time taken by OPEN CV conversion: %f msecs.\n", timer.Elapsed());
//output the image
cv::imwrite(outputFileName.c_str(), imageGrey);
}
/**
* Does the initial processing for cpu and in turn calls the method which actually converts
* RGBA image to Grey pixel by pixel serially
*/
void cpuStub(std::string inputFileName, std::string outputFileName) {
uchar4 *h_rgbaImage, *d_rgbaImage;
unsigned char *h_greyImage, *d_greyImage;
//Load the image for input and output
initialSetup(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, inputFileName);
GpuTimer timer;
timer.Start();
rgbaToGreyscaleCpu(h_rgbaImage, h_greyImage, numRows(), numCols());
timer.Stop();
int err = printf("Time taken by SERIAL conversion: %f msecs.\n", timer.Elapsed());
if (err < 0) {
//error in printing
std::cerr << "Error! STDOUT is Closed!" << std::endl;
exit(1);
}
finalTouches(outputFileName, h_greyImage);
}
|
6c4ebcb48d8a52ebdc13e06005040d84fe772db0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <random>
#include <time.h>
#include <math.h>
#define DIM 20
#define BlockSize 32
__global__ void multi(int *A, int *B, int *C)
{
int cvalue = 0;
//int cwidth = blockDim.x*gridDim.x, awidth = blockDim.x*gridDim.x, bwidth = blockDim.x*gridDim.x;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//int offset = iy*(blockDim.x*gridDim.x) + ix;
if (row > DIM || col > DIM) return;
for (int e = 0; e < DIM; ++e){
cvalue += A[row*DIM + e] * B[e*DIM + col];
}
C[row*DIM + col] = cvalue;
}
int main()
{
srand(time(0));
int A[DIM][DIM], B[DIM][DIM], C[DIM][DIM];
int *dev_a, *dev_b, *dev_c;
//allocate memory on global memory of gpu
hipError_t err = hipMalloc((void**)&dev_a, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc A:%s \n", hipGetErrorString(err));
err = hipMalloc((void**)&dev_b, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc B:%s \n", hipGetErrorString(err));
err = hipMalloc((void**)&dev_c, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc C:%s \n", hipGetErrorString(err));
//populate array A and B
for (int i = 0; i<DIM; i++) {
for (int j = 0; j < DIM; j++){
A[i][j] = rand()%100;
B[i][j] = rand()%100;
//printf("A(%d,%d) = %d \n", i, j, A[i][j]);
//printf("B(%d,%d) = %d \n", i, j, B[i][j]);
}
}
//Copy array A and B on device allocated memory
err = hipMemcpy(dev_a, A, ((DIM*DIM))*sizeof(int), hipMemcpyHostToDevice);
printf("Cuda memcpy to device A:%s \n", hipGetErrorString(err));
err = hipMemcpy(dev_b, B, ((DIM*DIM))*sizeof(int), hipMemcpyHostToDevice);
printf("Cuda memcpy to device B:%s \n", hipGetErrorString(err));
//two dimension threads
dim3 dimBlock(BlockSize, BlockSize);
dim3 dimGrid((DIM + dimBlock.x - 1) / dimBlock.x, (DIM + dimBlock.y - 1) / dimBlock.y);
//call the kernel function multi
multi <<< dimGrid,dimBlock >> >(dev_a, dev_b, dev_c);
//retrieve array C from device memory
err = hipMemcpy(C, dev_c, ((DIM*DIM))*sizeof(int), hipMemcpyDeviceToHost);
printf("Cuda memcpy to HOST C:%s \n", hipGetErrorString(err));
for (int i = 0; i < DIM; i++){
for (int j = 0; j < DIM; j++){
printf("C(%d,%d) = %d \n", i, j, C[i][j]);
}
}
//free the memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
| 6c4ebcb48d8a52ebdc13e06005040d84fe772db0.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <random>
#include <time.h>
#include <math.h>
#define DIM 20
#define BlockSize 32
__global__ void multi(int *A, int *B, int *C)
{
int cvalue = 0;
//int cwidth = blockDim.x*gridDim.x, awidth = blockDim.x*gridDim.x, bwidth = blockDim.x*gridDim.x;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
//int offset = iy*(blockDim.x*gridDim.x) + ix;
if (row > DIM || col > DIM) return;
for (int e = 0; e < DIM; ++e){
cvalue += A[row*DIM + e] * B[e*DIM + col];
}
C[row*DIM + col] = cvalue;
}
int main()
{
srand(time(0));
int A[DIM][DIM], B[DIM][DIM], C[DIM][DIM];
int *dev_a, *dev_b, *dev_c;
//allocate memory on global memory of gpu
cudaError_t err = cudaMalloc((void**)&dev_a, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc A:%s \n", cudaGetErrorString(err));
err = cudaMalloc((void**)&dev_b, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc B:%s \n", cudaGetErrorString(err));
err = cudaMalloc((void**)&dev_c, ((DIM)*(DIM))*sizeof(int));
printf("Cuda malloc C:%s \n", cudaGetErrorString(err));
//populate array A and B
for (int i = 0; i<DIM; i++) {
for (int j = 0; j < DIM; j++){
A[i][j] = rand()%100;
B[i][j] = rand()%100;
//printf("A(%d,%d) = %d \n", i, j, A[i][j]);
//printf("B(%d,%d) = %d \n", i, j, B[i][j]);
}
}
//Copy array A and B on device allocated memory
err = cudaMemcpy(dev_a, A, ((DIM*DIM))*sizeof(int), cudaMemcpyHostToDevice);
printf("Cuda memcpy to device A:%s \n", cudaGetErrorString(err));
err = cudaMemcpy(dev_b, B, ((DIM*DIM))*sizeof(int), cudaMemcpyHostToDevice);
printf("Cuda memcpy to device B:%s \n", cudaGetErrorString(err));
//two dimension threads
dim3 dimBlock(BlockSize, BlockSize);
dim3 dimGrid((DIM + dimBlock.x - 1) / dimBlock.x, (DIM + dimBlock.y - 1) / dimBlock.y);
//call the kernel function multi
multi <<< dimGrid,dimBlock >> >(dev_a, dev_b, dev_c);
//retrieve array C from device memory
err = cudaMemcpy(C, dev_c, ((DIM*DIM))*sizeof(int), cudaMemcpyDeviceToHost);
printf("Cuda memcpy to HOST C:%s \n", cudaGetErrorString(err));
for (int i = 0; i < DIM; i++){
for (int j = 0; j < DIM; j++){
printf("C(%d,%d) = %d \n", i, j, C[i][j]);
}
}
//free the memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
0f15d4a216ede210080c9e9906ea065c3fb941d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "scanKernelInclusive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
size_t size = XSIZE*YSIZE;
size_t offset = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
scanKernelInclusive), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,size,offset);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
scanKernelInclusive), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,size,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
scanKernelInclusive), dim3(gridBlock),dim3(threadBlock), 0, 0, c,a,size,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0f15d4a216ede210080c9e9906ea065c3fb941d6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "scanKernelInclusive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
size_t size = XSIZE*YSIZE;
size_t offset = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
scanKernelInclusive<<<gridBlock,threadBlock>>>(c,a,size,offset);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
scanKernelInclusive<<<gridBlock,threadBlock>>>(c,a,size,offset);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
scanKernelInclusive<<<gridBlock,threadBlock>>>(c,a,size,offset);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
aeaebb0da151ca415e2e7ab83069c742b6ba4f42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/pten/kernels/hybird/eigen/common.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
using bfloat16 = paddle::platform::bfloat16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, platform::bfloat16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, uint8_t>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int16_t>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<float>>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<double>>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, bool, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, bfloat16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<float>, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<double>, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
#define REINTERPRET(T, DST_PTR, SRC_PTR) \
T* DST_PTR = reinterpret_cast<T*>(SRC_PTR)
template <typename T>
__global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr,
int64_t element,
const int64_t* in_stride_ptr,
const int64_t* out_stride_ptr,
const int64_t* axis_ptr, int rank) {
CUDA_KERNEL_LOOP(out_idx, element) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride_ptr[i];
tmp_idx -= coordinate * out_stride_ptr[i];
in_idx += coordinate * in_stride_ptr[axis_ptr[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
}
template <typename T>
struct TransposeNormal<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
auto* in_ptr = in.data<T>();
auto* out_ptr = out->data<T>();
// copy in_stride, out_stride, axis to gpu device
const platform::CUDAPlace& cuda_place =
BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
platform::CPUPlace cpu_place = platform::CPUPlace();
size_t size = 3 * rank * sizeof(int64_t);
auto cpu_buf_holder = memory::Alloc(cpu_place, size);
auto cuda_buf_holder = memory::Alloc(cuda_place, size);
REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr());
REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr());
for (int i = 0; i < rank; ++i) {
cpu_buf[i] = in_stride[i];
cpu_buf[rank + i] = out_stride[i];
cpu_buf[2 * rank + i] = axis[i];
}
memory::Copy(cuda_place, cuda_buf, cpu_place, cpu_buf, size,
context.stream());
REINTERPRET(const int64_t, in_stride_ptr, cuda_buf);
REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank);
REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank);
const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock();
const int MAX_GRID_DIM =
context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int64_t elements = in.numel();
int block_size = (elements >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(elements)));
int grid_size = elements / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
hipLaunchKernelGGL(( TransposeNormalKernel<T>), dim3(grid_size), dim3(block_size), 0, context.stream(),
in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr,
rank);
}
};
// define transpose normal
#define DEFINE_GPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CUDADeviceContext, TYPE>
DEFINE_GPU_TRANS_NORMAL(float16);
DEFINE_GPU_TRANS_NORMAL(bfloat16);
DEFINE_GPU_TRANS_NORMAL(float);
DEFINE_GPU_TRANS_NORMAL(double);
DEFINE_GPU_TRANS_NORMAL(int);
DEFINE_GPU_TRANS_NORMAL(int64_t);
DEFINE_GPU_TRANS_NORMAL(bool);
DEFINE_GPU_TRANS_NORMAL(int16_t);
DEFINE_GPU_TRANS_NORMAL(uint8_t);
DEFINE_GPU_TRANS_NORMAL(int8_t);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<float>);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<double>);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
CUDA_KERNEL_LOOP(i, num) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(
vector.numel(), size,
platform::errors::InvalidArgument(
"The input vector size"
" should be equal to the size of each row of input tensor."
" Expected vector size=%d, but received %d",
size, vector.numel()));
const char* in_dims_cstr = in_dims.to_str().c_str();
const char* out_dims_cstr = out_dims.to_str().c_str();
PADDLE_ENFORCE_EQ(
out_dims, in_dims,
platform::errors::InvalidArgument(
"The output tensor shape should be same as the input tensor"
" shape. Expected output tensor shape: %s,"
" but received %s",
in_dims_cstr, out_dims_cstr));
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
hipLaunchKernelGGL(( RowwiseAddKernel<T>), dim3(grids), dim3(blocks), 0, context.stream(),
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size,
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor column"
" dimension. Expected vector size=%d, but received %d",
size, vector->numel()));
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0],
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor row"
" dimension. Expected vector size=%d, but received %d",
in_dims[0], vector->numel()));
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
template <typename T>
struct ElementwiseAddTo<platform::CUDADeviceContext, T> {
void operator()(platform::CUDADeviceContext* ctx,
const framework::Tensor& src, framework::Tensor* dst) {
auto in = framework::EigenVector<T>::Flatten(src);
auto out = framework::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
void operator()(platform::CUDADeviceContext* ctx,
const pten::DenseTensor& src, pten::DenseTensor* dst) {
auto in = pten::EigenVector<T>::Flatten(src);
auto out = pten::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
};
template struct ElementwiseAddTo<platform::CUDADeviceContext,
platform::float16>;
} // namespace math
} // namespace operators
} // namespace paddle
| aeaebb0da151ca415e2e7ab83069c742b6ba4f42.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/memory/malloc.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/math_function_impl.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/pten/kernels/hybird/eigen/common.h"
namespace paddle {
namespace operators {
namespace math {
using float16 = paddle::platform::float16;
using bfloat16 = paddle::platform::bfloat16;
template struct SetConstant<platform::CUDADeviceContext, platform::float16>;
template struct SetConstant<platform::CUDADeviceContext, platform::bfloat16>;
template struct SetConstant<platform::CUDADeviceContext, float>;
template struct SetConstant<platform::CUDADeviceContext, double>;
template struct SetConstant<platform::CUDADeviceContext, uint8_t>;
template struct SetConstant<platform::CUDADeviceContext, int>;
template struct SetConstant<platform::CUDADeviceContext, int16_t>;
template struct SetConstant<platform::CUDADeviceContext, int64_t>;
template struct SetConstant<platform::CUDADeviceContext, bool>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<float>>;
template struct SetConstant<platform::CUDADeviceContext,
platform::complex<double>>;
#define DEFINE_GPU_TRANS(RANK) \
template struct Transpose<platform::CUDADeviceContext, bool, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float, RANK>; \
template struct Transpose<platform::CUDADeviceContext, double, RANK>; \
template struct Transpose<platform::CUDADeviceContext, float16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, bfloat16, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int8_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int32_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, int64_t, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<float>, RANK>; \
template struct Transpose<platform::CUDADeviceContext, \
paddle::platform::complex<double>, RANK>;
DEFINE_GPU_TRANS(1);
DEFINE_GPU_TRANS(2);
DEFINE_GPU_TRANS(3);
DEFINE_GPU_TRANS(4);
DEFINE_GPU_TRANS(5);
DEFINE_GPU_TRANS(6);
#define REINTERPRET(T, DST_PTR, SRC_PTR) \
T* DST_PTR = reinterpret_cast<T*>(SRC_PTR)
template <typename T>
__global__ void TransposeNormalKernel(const T* in_ptr, T* out_ptr,
int64_t element,
const int64_t* in_stride_ptr,
const int64_t* out_stride_ptr,
const int64_t* axis_ptr, int rank) {
CUDA_KERNEL_LOOP(out_idx, element) {
int64_t in_idx = 0;
int64_t tmp_idx = out_idx;
for (int i = 0; i < rank; ++i) {
const int64_t coordinate = tmp_idx / out_stride_ptr[i];
tmp_idx -= coordinate * out_stride_ptr[i];
in_idx += coordinate * in_stride_ptr[axis_ptr[i]];
}
out_ptr[out_idx] = in_ptr[in_idx];
}
}
template <typename T>
struct TransposeNormal<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& in, framework::Tensor* out,
const std::vector<int>& axis) {
const int rank = axis.size();
auto in_stride = framework::stride(in.dims());
auto out_stride = framework::stride(out->dims());
auto* in_ptr = in.data<T>();
auto* out_ptr = out->data<T>();
// copy in_stride, out_stride, axis to gpu device
const platform::CUDAPlace& cuda_place =
BOOST_GET_CONST(platform::CUDAPlace, context.GetPlace());
platform::CPUPlace cpu_place = platform::CPUPlace();
size_t size = 3 * rank * sizeof(int64_t);
auto cpu_buf_holder = memory::Alloc(cpu_place, size);
auto cuda_buf_holder = memory::Alloc(cuda_place, size);
REINTERPRET(int64_t, cpu_buf, cpu_buf_holder->ptr());
REINTERPRET(int64_t, cuda_buf, cuda_buf_holder->ptr());
for (int i = 0; i < rank; ++i) {
cpu_buf[i] = in_stride[i];
cpu_buf[rank + i] = out_stride[i];
cpu_buf[2 * rank + i] = axis[i];
}
memory::Copy(cuda_place, cuda_buf, cpu_place, cpu_buf, size,
context.stream());
REINTERPRET(const int64_t, in_stride_ptr, cuda_buf);
REINTERPRET(const int64_t, out_stride_ptr, cuda_buf + rank);
REINTERPRET(const int64_t, axis_ptr, cuda_buf + 2 * rank);
const int MAX_BLOCK_DIM = context.GetMaxThreadsPerBlock();
const int MAX_GRID_DIM =
context.GetMaxPhysicalThreadCount() / MAX_BLOCK_DIM;
int64_t elements = in.numel();
int block_size = (elements >= MAX_BLOCK_DIM)
? MAX_BLOCK_DIM
: (1 << static_cast<int>(std::log2(elements)));
int grid_size = elements / block_size;
grid_size = (grid_size >= MAX_GRID_DIM) ? MAX_GRID_DIM : grid_size;
TransposeNormalKernel<T><<<grid_size, block_size, 0, context.stream()>>>(
in_ptr, out_ptr, elements, in_stride_ptr, out_stride_ptr, axis_ptr,
rank);
}
};
// define transpose normal
#define DEFINE_GPU_TRANS_NORMAL(TYPE) \
template struct TransposeNormal<platform::CUDADeviceContext, TYPE>
DEFINE_GPU_TRANS_NORMAL(float16);
DEFINE_GPU_TRANS_NORMAL(bfloat16);
DEFINE_GPU_TRANS_NORMAL(float);
DEFINE_GPU_TRANS_NORMAL(double);
DEFINE_GPU_TRANS_NORMAL(int);
DEFINE_GPU_TRANS_NORMAL(int64_t);
DEFINE_GPU_TRANS_NORMAL(bool);
DEFINE_GPU_TRANS_NORMAL(int16_t);
DEFINE_GPU_TRANS_NORMAL(uint8_t);
DEFINE_GPU_TRANS_NORMAL(int8_t);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<float>);
DEFINE_GPU_TRANS_NORMAL(paddle::platform::complex<double>);
struct TensorSetConstantGPU {
TensorSetConstantGPU(const platform::DeviceContext& context,
framework::Tensor* tensor, float value)
: context_(context), tensor_(tensor), value_(value) {}
template <typename T>
void apply() const {
SetConstant<platform::CUDADeviceContext, T> functor;
functor(reinterpret_cast<const platform::CUDADeviceContext&>(context_),
tensor_, static_cast<T>(value_));
}
const platform::DeviceContext& context_;
framework::Tensor* tensor_;
float value_;
};
template <>
void set_constant_with_place<platform::CUDAPlace>(
const platform::DeviceContext& context, framework::Tensor* tensor,
float value) {
framework::VisitDataType(tensor->type(),
TensorSetConstantGPU(context, tensor, value));
}
template <typename T>
__global__ void RowwiseAddKernel(const T* a, const T* b, T* c, int width,
int num) {
T tmp = 1.0 / width;
CUDA_KERNEL_LOOP(i, num) {
int h = i * tmp;
int w = i - h * width;
c[i] = a[i] + b[w];
}
}
template <typename T>
struct RowwiseAdd<platform::CUDADeviceContext, T> {
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& vector, framework::Tensor* output) {
auto in_dims = input.dims();
auto out_dims = output->dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(
vector.numel(), size,
platform::errors::InvalidArgument(
"The input vector size"
" should be equal to the size of each row of input tensor."
" Expected vector size=%d, but received %d",
size, vector.numel()));
const char* in_dims_cstr = in_dims.to_str().c_str();
const char* out_dims_cstr = out_dims.to_str().c_str();
PADDLE_ENFORCE_EQ(
out_dims, in_dims,
platform::errors::InvalidArgument(
"The output tensor shape should be same as the input tensor"
" shape. Expected output tensor shape: %s,"
" but received %s",
in_dims_cstr, out_dims_cstr));
int blocks = 512;
int grids = (input.numel() + blocks - 1) / blocks;
RowwiseAddKernel<T><<<grids, blocks, 0, context.stream()>>>(
input.data<T>(), vector.data<T>(), output->data<T>(),
static_cast<int>(in_dims[1]), static_cast<int>(input.numel()));
}
};
template struct RowwiseAdd<platform::CUDADeviceContext, float>;
template struct RowwiseAdd<platform::CUDADeviceContext, double>;
template struct ColwiseSum<platform::CUDADeviceContext, float>;
template struct ColwiseSum<platform::CUDADeviceContext, int>;
template struct ColwiseSum<platform::CUDADeviceContext, int64_t>;
// template struct ColwiseSum<platform::CUDADeviceContext, double>;
// The ColwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void ColwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), size,
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor column"
" dimension. Expected vector size=%d, but received %d",
size, vector->numel()));
framework::Tensor one;
one.mutable_data<double>({in_dims[0]}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[0]), static_cast<int>(in_dims[1]), 1.0,
input.data<double>(), one.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseSum<platform::CUDADeviceContext, float>;
// template struct RowwiseSum<platform::CUDADeviceContext, double>;
// TODO(zcd): Following ColwiseSum format, need to confirm.
// The RowwiseSum<platform::CUDADeviceContext, double> failed in debug mode,
// and only failed for this case. So reimplemented it.
template <>
void RowwiseSum<platform::CUDADeviceContext, double>::operator()(
const platform::CUDADeviceContext& context, const framework::Tensor& input,
framework::Tensor* vector) {
auto in_dims = input.dims();
auto size = input.numel() / in_dims[0];
PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0],
platform::errors::InvalidArgument(
"The size of input vector"
" should be equal to the size of input tensor row"
" dimension. Expected vector size=%d, but received %d",
in_dims[0], vector->numel()));
framework::Tensor one;
one.mutable_data<double>({size}, context.GetPlace());
SetConstant<platform::CUDADeviceContext, double> set;
set(context, &one, static_cast<double>(1.0));
GetBlas<platform::CUDADeviceContext, double>(context).GEMV(
true, static_cast<int>(in_dims[1]), static_cast<int>(in_dims[0]), 1.0,
one.data<double>(), input.data<double>(), 0.0, vector->data<double>());
}
template struct RowwiseMean<platform::CUDADeviceContext, float>;
template struct RowwiseMean<platform::CUDADeviceContext, double>;
template <typename T>
struct ElementwiseAddTo<platform::CUDADeviceContext, T> {
void operator()(platform::CUDADeviceContext* ctx,
const framework::Tensor& src, framework::Tensor* dst) {
auto in = framework::EigenVector<T>::Flatten(src);
auto out = framework::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
void operator()(platform::CUDADeviceContext* ctx,
const pten::DenseTensor& src, pten::DenseTensor* dst) {
auto in = pten::EigenVector<T>::Flatten(src);
auto out = pten::EigenVector<T>::Flatten(*dst);
auto& place = *(ctx->eigen_device());
out.device(place) = out + in;
}
};
template struct ElementwiseAddTo<platform::CUDADeviceContext,
platform::float16>;
} // namespace math
} // namespace operators
} // namespace paddle
|
66023f6dc0797794a1aab0ad99f0492d84735d1b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include <ATen/hip/HIPContext.h>
#include <torch/extension.h>
#include "shared_utils.cuh"
using namespace nvcuda;
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractF32BwdKernelNonAligned(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size]; //skip over the part where we copy in the input
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols]; //where the bottom mlp grad of our sample will land
float *gmem_interaction_grad = &grad[grad_batch_offset]; //where the interaction grads of our sample will land
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
// fwd output contained mlp at the start, so the gradient has mlp grad at the start
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
for (uint idx = threadIdx.x; idx < input_size; idx += blockDim.x) {
smem_in[idx] = gmem_in[idx];
}
// Interaction Upstream Grad -> Shared Memory
for (uint idx = threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
gmem_mlp_grad[idx] = gmem_mlp_ugrad[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
// Calculate a single column (1...128) of the output
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
// Pick a row: now we calculating a single value of the gradient
float sum = 0;
// Jump to our row in (flattened) triangular matrix of upstream gradients
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
// Iterate over all the interactions we took part in
// Sum upstream gradient for that interaction multiplied with the right element of the other vector in the interaction
// We need to do this in two passes because we only keep the triangular part of the matrix, so the row "bends"
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
// This kernel assumes that:
// input_size is divisible by 4
// num_cols is divisible by 4
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size];
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols];
float *gmem_interaction_grad = &grad[grad_batch_offset];
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
uint input_size_float4 = input_size >> 2;
for (uint idx = threadIdx.x; idx < input_size_float4; idx += blockDim.x) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_in)[idx];
}
// Interaction Upstream Grad -> Shared Memory
uint upstream_grad_size_float4 = interaction_ugrad_size >> 2;
for (uint idx = threadIdx.x; idx < upstream_grad_size_float4; idx += blockDim.x) {
((float4 *)smem_interaction_ugrad)[idx] = ((float4 *)gmem_interaction_ugrad)[idx];
}
// This may seem like it may never be activated, but it will
// interaction_ugrad_size is the unpadded size, so it will probably not align to 4
// This loop copies the part that is left over from the vectorized copy above
uint vectorized_load_offset = (upstream_grad_size_float4 << 2);
for (uint idx = vectorized_load_offset + threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < (num_cols >> 2); idx += blockDim.x) {
((float4 *)gmem_mlp_grad)[idx] = ((float4 *)gmem_mlp_ugrad)[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
float sum = 0;
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
inline void dotBasedInteractF32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
uint input_size = num_rows * num_cols;
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; //this IS supposed to be without padding
// this has to be the same padding that we applied in forward
uint unpadded_ugrad_size = num_cols + interaction_ugrad_size;
// this has to be the same padding that we applied in forward
uint padded_ugrad_size = ((unpadded_ugrad_size-1)/8 + 1)*8; //round up to multiple of 8
// input space + upstream grad space
// We copy the whole input plus just the unpadded interaction part of the upstream grad
uint smem_size_elems = input_size + interaction_ugrad_size;
uint smem_size_bytes = smem_size_elems << 2; // F32 Kernel
// we use the fact that padded_ugrad_size is always divisible by 4 - we just made it.
bool float4_predicate = !(num_cols & 3);
if (float4_predicate) {
hipLaunchKernelGGL(( dotBasedInteractF32BwdKernel<kNumThreads>)
, dim3(num_blocks), dim3(kNumThreads), smem_size_bytes,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), (const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
} else {
hipLaunchKernelGGL(( dotBasedInteractF32BwdKernelNonAligned<kNumThreads>)
, dim3(num_blocks), dim3(kNumThreads), smem_size_bytes,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), (const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
}
}
| 66023f6dc0797794a1aab0ad99f0492d84735d1b.cu | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "shared_utils.cuh"
using namespace nvcuda;
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractF32BwdKernelNonAligned(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size]; //skip over the part where we copy in the input
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols]; //where the bottom mlp grad of our sample will land
float *gmem_interaction_grad = &grad[grad_batch_offset]; //where the interaction grads of our sample will land
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
// fwd output contained mlp at the start, so the gradient has mlp grad at the start
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
for (uint idx = threadIdx.x; idx < input_size; idx += blockDim.x) {
smem_in[idx] = gmem_in[idx];
}
// Interaction Upstream Grad -> Shared Memory
for (uint idx = threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
gmem_mlp_grad[idx] = gmem_mlp_ugrad[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
// Calculate a single column (1...128) of the output
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
// Pick a row: now we calculating a single value of the gradient
float sum = 0;
// Jump to our row in (flattened) triangular matrix of upstream gradients
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
// Iterate over all the interactions we took part in
// Sum upstream gradient for that interaction multiplied with the right element of the other vector in the interaction
// We need to do this in two passes because we only keep the triangular part of the matrix, so the row "bends"
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
// This kernel assumes that:
// input_size is divisible by 4
// num_cols is divisible by 4
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size];
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols];
float *gmem_interaction_grad = &grad[grad_batch_offset];
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
uint input_size_float4 = input_size >> 2;
for (uint idx = threadIdx.x; idx < input_size_float4; idx += blockDim.x) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_in)[idx];
}
// Interaction Upstream Grad -> Shared Memory
uint upstream_grad_size_float4 = interaction_ugrad_size >> 2;
for (uint idx = threadIdx.x; idx < upstream_grad_size_float4; idx += blockDim.x) {
((float4 *)smem_interaction_ugrad)[idx] = ((float4 *)gmem_interaction_ugrad)[idx];
}
// This may seem like it may never be activated, but it will
// interaction_ugrad_size is the unpadded size, so it will probably not align to 4
// This loop copies the part that is left over from the vectorized copy above
uint vectorized_load_offset = (upstream_grad_size_float4 << 2);
for (uint idx = vectorized_load_offset + threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < (num_cols >> 2); idx += blockDim.x) {
((float4 *)gmem_mlp_grad)[idx] = ((float4 *)gmem_mlp_ugrad)[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
float sum = 0;
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
inline void dotBasedInteractF32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
uint input_size = num_rows * num_cols;
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; //this IS supposed to be without padding
// this has to be the same padding that we applied in forward
uint unpadded_ugrad_size = num_cols + interaction_ugrad_size;
// this has to be the same padding that we applied in forward
uint padded_ugrad_size = ((unpadded_ugrad_size-1)/8 + 1)*8; //round up to multiple of 8
// input space + upstream grad space
// We copy the whole input plus just the unpadded interaction part of the upstream grad
uint smem_size_elems = input_size + interaction_ugrad_size;
uint smem_size_bytes = smem_size_elems << 2; // F32 Kernel
// we use the fact that padded_ugrad_size is always divisible by 4 - we just made it.
bool float4_predicate = !(num_cols & 3);
if (float4_predicate) {
dotBasedInteractF32BwdKernel<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes,
at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
} else {
dotBasedInteractF32BwdKernelNonAligned<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes,
at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
}
}
|
70741ed9673ecb54d2d9a380fcd2883872bb472a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
extern "C" __device__
float dist_cuda(float a, float b) {
return sqrt(a*a + b*b);
}
| 70741ed9673ecb54d2d9a380fcd2883872bb472a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
extern "C" __device__
float dist_cuda(float a, float b) {
return sqrt(a*a + b*b);
}
|
c12efceb11b3a5ac985b3d4b4b45eedeed727ef7.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.hpp"
namespace filter
{
template void linearRow<uchar3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
#endif /* CUDA_DISABLER */
| c12efceb11b3a5ac985b3d4b4b45eedeed727ef7.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.hpp"
namespace filter
{
template void linearRow<uchar3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
#endif /* CUDA_DISABLER */
|
bee6e676d1818cb39a1ec32b421bc919db154bac.hip | // !!! This is a file automatically generated by hipify!!!
//Example 1. Application Using C and cuBLAS: 1-based indexing
//-----------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#define M 6
#define N 5
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
hipblasSscal (handle, n-q+1, &alpha, &m[IDX2F(p,q,ldm)], ldm);
hipblasSscal (handle, ldm-p+1, &beta, &m[IDX2F(p,q,ldm)], 1);
}
int main (void){
hipError_t cudaStat;
hipblasStatus_t stat;
hipblasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i,j,M)] = (float)((i-1) * N + j);
}
}
cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != hipSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = hipblasCreate(&handle);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data download failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != HIPBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
hipFree (devPtrA);
hipblasDestroy(handle);
return EXIT_FAILURE;
}
hipFree (devPtrA);
hipblasDestroy(handle);
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
printf ("%7.0f", a[IDX2F(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
}
| bee6e676d1818cb39a1ec32b421bc919db154bac.cu | //Example 1. Application Using C and cuBLAS: 1-based indexing
//-----------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#define M 6
#define N 5
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){
cublasSscal (handle, n-q+1, &alpha, &m[IDX2F(p,q,ldm)], ldm);
cublasSscal (handle, ldm-p+1, &beta, &m[IDX2F(p,q,ldm)], 1);
}
int main (void){
cudaError_t cudaStat;
cublasStatus_t stat;
cublasHandle_t handle;
int i, j;
float* devPtrA;
float* a = 0;
a = (float *)malloc (M * N * sizeof (*a));
if (!a) {
printf ("host memory allocation failed");
return EXIT_FAILURE;
}
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
a[IDX2F(i,j,M)] = (float)((i-1) * N + j);
}
}
cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a));
if (cudaStat != cudaSuccess) {
printf ("device memory allocation failed");
return EXIT_FAILURE;
}
stat = cublasCreate(&handle);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data download failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f);
stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M);
if (stat != CUBLAS_STATUS_SUCCESS) {
printf ("data upload failed");
cudaFree (devPtrA);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree (devPtrA);
cublasDestroy(handle);
for (j = 1; j <= N; j++) {
for (i = 1; i <= M; i++) {
printf ("%7.0f", a[IDX2F(i,j,M)]);
}
printf ("\n");
}
free(a);
return EXIT_SUCCESS;
}
|
e4305b9661e689c6848ae6540faf0e7e60586895.hip | // !!! This is a file automatically generated by hipify!!!
/********************************************************************************
*
* Copyright (C) 2015 Culham Centre for Fusion Energy,
* United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
********************************************************************************
*
* Program: SPILADY - A Spin-Lattice Dynamics Simulation Program
* Version: 1.0
* Date: Aug 2015
* Author: Pui-Wai (Leo) MA
* Contact: [email protected]
* Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom
*
********************************************************************************/
#ifdef GPU
#include "spilady.h"
#include "prototype_GPU.h"
void copy_CPU_to_GPU(){
//copy all information of atoms from CPU to GPU
hipMalloc((void**)&first_atom_ptr_d, natom*sizeof(atom_struct));
hipMemcpy(first_atom_ptr_d, first_atom_ptr, natom*sizeof(atom_struct), hipMemcpyHostToDevice);
//copy all information of linkcells from CPU to GPU
hipMalloc((void**)&first_cell_ptr_d, ncells*sizeof(cell_struct));
hipMemcpy(first_cell_ptr_d, first_cell_ptr, ncells*sizeof(cell_struct), hipMemcpyHostToDevice);
//copy all necessary variables from CPU to GPU, using "struct varGPU".
var_ptr = (varGPU*)malloc(sizeof(varGPU));
hipMalloc((void**)&var_ptr_d, sizeof(varGPU));
var_ptr->ninput = ninput;
var_ptr->finput = finput;
var_ptr->rmax = rmax;
var_ptr->rhomax = rhomax;
var_ptr->finput_over_rmax = finput_over_rmax;
var_ptr->finput_over_rhomax = finput_over_rhomax;
var_ptr->nperfect = nperfect ;
var_ptr->natom = natom;
var_ptr->box_length = box_length;
var_ptr->box_length_half = box_length_half;
var_ptr->box_volume = box_volume;
var_ptr->d = d;
var_ptr->Inv_d = Inv_d;
var_ptr->density = density;
var_ptr->no_of_link_cell_x = no_of_link_cell_x;
var_ptr->no_of_link_cell_y = no_of_link_cell_y;
var_ptr->no_of_link_cell_z = no_of_link_cell_z;
var_ptr->ncells = ncells;
var_ptr->a_lattice = a_lattice;
#ifdef hcp0001
var_ptr->c_lattice = c_lattice;
#endif
var_ptr->no_of_unit_cell_x = no_of_unit_cell_x;
var_ptr->no_of_unit_cell_y = no_of_unit_cell_y;
var_ptr->no_of_unit_cell_z = no_of_unit_cell_z;
var_ptr->unit_cell_no_of_atom = unit_cell_no_of_atom;
var_ptr->unit_cell_edge_x = unit_cell_edge_x;
var_ptr->unit_cell_edge_y = unit_cell_edge_y;
var_ptr->unit_cell_edge_z = unit_cell_edge_z;
var_ptr->atmass = atmass;
var_ptr->temperature = temperature;
#if (defined MD || defined SLDH || defined SLDHL || defined SLDNC) && defined lattlang
var_ptr->gamma_L_over_mass = gamma_L_over_mass;
var_ptr->gamma_L = gamma_L;
#endif
#if (defined SDH || defined SLDH) && defined spinlang
var_ptr->gamma_S_H = gamma_S_H;
#endif
#if (defined SDHL || defined SLDHL) && defined spinlang
var_ptr->gamma_S_HL = gamma_S_HL;
#endif
#if defined STRESS
var_ptr->stress_xx = stress_xx;
var_ptr->stress_yy = stress_yy;
var_ptr->stress_zz = stress_zz;
#endif
#if defined PRESSURE
var_ptr->pressure = pressure;
#endif
#if defined STRESS || defined PRESSURE
var_ptr->baro_damping_time = baro_damping_time;
#endif
#if defined MD || defined SLDH || defined SLDHL || defined SLDNC
var_ptr->rcut_pot = rcut_pot;
var_ptr->rcut_pot_sq = rcut_pot_sq;
#endif
#if defined SDH || defined SDHL || defined SLDH || defined SLDHL
var_ptr->rcut_mag = rcut_mag;
var_ptr->rcut_mag_sq = rcut_mag_sq;
#endif
var_ptr->rcut_max = rcut_max;
var_ptr->rcut_max_sq = rcut_max_sq;
#ifdef localvol
var_ptr->rcut_vol = rcut_vol;
#endif
var_ptr->min_length_link_cell = min_length_link_cell;
#ifdef extfield
var_ptr->Hext = Hext;
#endif
#ifdef changestep
var_ptr->displace_limit = displace_limit;
var_ptr->phi_limit = phi_limit;
#endif
#ifdef SLDNC
var_ptr->para = para;
#endif
hipMemcpy(var_ptr_d, var_ptr, sizeof(varGPU), hipMemcpyHostToDevice);
}
void copy_atoms_from_GPU_to_CPU(){
hipMemcpy(first_atom_ptr, first_atom_ptr_d, natom*sizeof(atom_struct), hipMemcpyDeviceToHost);
}
void copy_cells_from_GPU_to_CPU(){
hipMemcpy(first_cell_ptr, first_cell_ptr_d, ncells*sizeof(cell_struct), hipMemcpyDeviceToHost);
}
void free_copy_CPU_to_GPU(){
hipFree(first_atom_ptr_d);
hipFree(first_cell_ptr_d);
hipFree(var_ptr_d);
free(var_ptr);
}
#endif
| e4305b9661e689c6848ae6540faf0e7e60586895.cu | /********************************************************************************
*
* Copyright (C) 2015 Culham Centre for Fusion Energy,
* United Kingdom Atomic Energy Authority, Oxfordshire OX14 3DB, UK
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
********************************************************************************
*
* Program: SPILADY - A Spin-Lattice Dynamics Simulation Program
* Version: 1.0
* Date: Aug 2015
* Author: Pui-Wai (Leo) MA
* Contact: [email protected]
* Address: Culham Centre for Fusion Energy, OX14 3DB, United Kingdom
*
********************************************************************************/
#ifdef GPU
#include "spilady.h"
#include "prototype_GPU.h"
void copy_CPU_to_GPU(){
//copy all information of atoms from CPU to GPU
cudaMalloc((void**)&first_atom_ptr_d, natom*sizeof(atom_struct));
cudaMemcpy(first_atom_ptr_d, first_atom_ptr, natom*sizeof(atom_struct), cudaMemcpyHostToDevice);
//copy all information of linkcells from CPU to GPU
cudaMalloc((void**)&first_cell_ptr_d, ncells*sizeof(cell_struct));
cudaMemcpy(first_cell_ptr_d, first_cell_ptr, ncells*sizeof(cell_struct), cudaMemcpyHostToDevice);
//copy all necessary variables from CPU to GPU, using "struct varGPU".
var_ptr = (varGPU*)malloc(sizeof(varGPU));
cudaMalloc((void**)&var_ptr_d, sizeof(varGPU));
var_ptr->ninput = ninput;
var_ptr->finput = finput;
var_ptr->rmax = rmax;
var_ptr->rhomax = rhomax;
var_ptr->finput_over_rmax = finput_over_rmax;
var_ptr->finput_over_rhomax = finput_over_rhomax;
var_ptr->nperfect = nperfect ;
var_ptr->natom = natom;
var_ptr->box_length = box_length;
var_ptr->box_length_half = box_length_half;
var_ptr->box_volume = box_volume;
var_ptr->d = d;
var_ptr->Inv_d = Inv_d;
var_ptr->density = density;
var_ptr->no_of_link_cell_x = no_of_link_cell_x;
var_ptr->no_of_link_cell_y = no_of_link_cell_y;
var_ptr->no_of_link_cell_z = no_of_link_cell_z;
var_ptr->ncells = ncells;
var_ptr->a_lattice = a_lattice;
#ifdef hcp0001
var_ptr->c_lattice = c_lattice;
#endif
var_ptr->no_of_unit_cell_x = no_of_unit_cell_x;
var_ptr->no_of_unit_cell_y = no_of_unit_cell_y;
var_ptr->no_of_unit_cell_z = no_of_unit_cell_z;
var_ptr->unit_cell_no_of_atom = unit_cell_no_of_atom;
var_ptr->unit_cell_edge_x = unit_cell_edge_x;
var_ptr->unit_cell_edge_y = unit_cell_edge_y;
var_ptr->unit_cell_edge_z = unit_cell_edge_z;
var_ptr->atmass = atmass;
var_ptr->temperature = temperature;
#if (defined MD || defined SLDH || defined SLDHL || defined SLDNC) && defined lattlang
var_ptr->gamma_L_over_mass = gamma_L_over_mass;
var_ptr->gamma_L = gamma_L;
#endif
#if (defined SDH || defined SLDH) && defined spinlang
var_ptr->gamma_S_H = gamma_S_H;
#endif
#if (defined SDHL || defined SLDHL) && defined spinlang
var_ptr->gamma_S_HL = gamma_S_HL;
#endif
#if defined STRESS
var_ptr->stress_xx = stress_xx;
var_ptr->stress_yy = stress_yy;
var_ptr->stress_zz = stress_zz;
#endif
#if defined PRESSURE
var_ptr->pressure = pressure;
#endif
#if defined STRESS || defined PRESSURE
var_ptr->baro_damping_time = baro_damping_time;
#endif
#if defined MD || defined SLDH || defined SLDHL || defined SLDNC
var_ptr->rcut_pot = rcut_pot;
var_ptr->rcut_pot_sq = rcut_pot_sq;
#endif
#if defined SDH || defined SDHL || defined SLDH || defined SLDHL
var_ptr->rcut_mag = rcut_mag;
var_ptr->rcut_mag_sq = rcut_mag_sq;
#endif
var_ptr->rcut_max = rcut_max;
var_ptr->rcut_max_sq = rcut_max_sq;
#ifdef localvol
var_ptr->rcut_vol = rcut_vol;
#endif
var_ptr->min_length_link_cell = min_length_link_cell;
#ifdef extfield
var_ptr->Hext = Hext;
#endif
#ifdef changestep
var_ptr->displace_limit = displace_limit;
var_ptr->phi_limit = phi_limit;
#endif
#ifdef SLDNC
var_ptr->para = para;
#endif
cudaMemcpy(var_ptr_d, var_ptr, sizeof(varGPU), cudaMemcpyHostToDevice);
}
void copy_atoms_from_GPU_to_CPU(){
cudaMemcpy(first_atom_ptr, first_atom_ptr_d, natom*sizeof(atom_struct), cudaMemcpyDeviceToHost);
}
void copy_cells_from_GPU_to_CPU(){
cudaMemcpy(first_cell_ptr, first_cell_ptr_d, ncells*sizeof(cell_struct), cudaMemcpyDeviceToHost);
}
void free_copy_CPU_to_GPU(){
cudaFree(first_atom_ptr_d);
cudaFree(first_cell_ptr_d);
cudaFree(var_ptr_d);
free(var_ptr);
}
#endif
|
ba323fa5c2c956e7d17d54eec6f08d926076a171.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "STREAM_Add_Optimized_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
STREAM_Add_Optimized_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
STREAM_Add_Optimized_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
STREAM_Add_Optimized_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ba323fa5c2c956e7d17d54eec6f08d926076a171.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "STREAM_Add_Optimized_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
STREAM_Add_Optimized_double<<<gridBlock,threadBlock>>>(a,b,c,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
STREAM_Add_Optimized_double<<<gridBlock,threadBlock>>>(a,b,c,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
STREAM_Add_Optimized_double<<<gridBlock,threadBlock>>>(a,b,c,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0d4b19bef8268e3387c5c92fa295b3a9f371aabb.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "colors.h"
#define MAX_ITERATIONS 65536
#define THREADS_PER_BLOCK 256
__device__ float autoPow(float x, float y)
{
return powf(x, y);
}
__device__ double autoPow(double x, double y)
{
return pow(x, y);
}
template<typename T>
__global__ void mandelbrotKernel(const int depthStart,
const int depthEnd,
const T zoomFactor,
const int width,
const int height,
unsigned* const intensities)
{
// Calculate the global thread index and exit if there is no work to do
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int pz = depthStart + idx / (height * width);
const int py = (idx / width) % height;
const int px = idx % width;
if (idx >= (depthEnd - depthStart) * height * width) return;
// Parameters for a really cool part that has great depth
const T xCenter = static_cast<T>(-0.235125001);
const T yCenter = static_cast<T>(0.827215);
//const T xCenter = static_cast<T>(-0.598274455069517539539);
//const T yCenter = static_cast<T>(0.663825928894102918143);
const T baseVerticalRadius = static_cast<T>(1.0);//(0.00004);
// This controls how much each depth zooms in, bounded by (0,1]. Lower values zoom faster.
//const T zoomFactor = static_cast<T>(0.9);
//const T zoomFactor = static_cast<T>(0.95);
// Compute mandelbrot set
const T yRadius = baseVerticalRadius * autoPow(zoomFactor, pz);
const T xRadius = yRadius * width / height;
const T y0 = yCenter + yRadius - static_cast<T>(2.0) * yRadius * py / (height - 1);
const T x0 = xCenter - xRadius + static_cast<T>(2.0) * xRadius * px / (width - 1);
T x = x0;
T y = y0;
T x2 = x * x;
T y2 = y * y;
unsigned iteration = 0;
while (x2 + y2 <= static_cast<T>(4.0) && iteration < MAX_ITERATIONS)
{
y = static_cast<T>(2.0) * x * y + y0;
x = x2 - y2 + x0;
x2 = x * x;
y2 = y * y;
++iteration;
}
// Store intensity value for determining color later
intensities[idx] = iteration;
}
template __global__ void mandelbrotKernel<float>(const int, const int, const float, const int, const int, unsigned* const);
template __global__ void mandelbrotKernel<double>(const int, const int, const double, const int, const int, unsigned* const);
void savePpmImage(const char* const filename, const int width, const int height, const unsigned* const intensities)
{
// Open the file
std::ofstream outfile(filename);
// Write the header
//outfile << "P3 " << width << " " << height << " " << MAX_ITERATIONS / 2 << "\n";
outfile << "P3 " << width << " " << height << " " << 255 << "\n";
// Write pixel information
for (int y = 0; y < height; ++y)
{
const int yidx = y * width;
for (int x = 0; x < width; ++x)
{
//const std::array<unsigned, 3> color = mapIntensityToColor(intensities[yidx + x]);
//outfile << color[0] << " " << color[1] << " " << color[2] << " ";
const unsigned i = intensities[yidx + x] % 256;
outfile << djl70::paletteRed[i] << " " << djl70::paletteGreen[i] << " " << djl70::paletteBlue[i] << " ";
}
outfile << "\n";
}
outfile.close();
}
void printUsage(const char* const programName)
{
std::cerr << "Usage: " << programName << " <depths> <width> [save_output [depth_start [zoom_factor]]]\n"
<< "Description: Computes the Mandelbrot set using CUDA\n"
<< "Argument combinations worth trying:\n"
<< " " << programName << " 256 120 1\n"
<< " " << programName << " 16 600 1\n"
<< " " << programName << " 64 2400 0\n"
<< " " << programName << " 64 2400 0 64\n"
<< " " << programName << " 512 600 0 0 0.99\n"
<< "\n"
<< "depths: The number of 'layers' to process (each layer 'zooms in' to the set)\n"
<< "width: The width of the images to process (height is set automatically)\n"
<< "save_output: (optional, default 0) Set to 1 to save the processed images (note: not recommended beyond depths=64 and width=600, because otherwise saving the images may require a lot of time and disk space)\n"
<< "depth_start: (optional, default 0) The zero-based 'layer' to begin processing at, inclusive\n"
<< "zoom_factor: (optional, default 0.9) A value (0 < zoom_factor <= 1) deciding how much to zoom for each 'layer'. Higher values produce slower zooms"
<< std::endl;
}
int main(int argc, char* argv[])
{
// Verify command line arguments
if (argc < 3)
{
printUsage(argv[0]);
return -1;
}
const int depths = atoi(argv[1]);
if (depths < 1)
{
std::cerr << "Error: arg 'depths' must be at least 1" << std::endl;
return -1;
}
const int width = atoi(argv[2]);
if (width < 2)
{
std::cerr << "Error: arg 'width' must be at least 2" << std::endl;
return -1;
}
const int height = (float)width * 2.0f / 3.0f;
const bool doSaveImages = (argc >= 4) && (atoi(argv[3]) == 1);
const int depthStart = (argc >= 5) ? atoi(argv[4]) : 0;
if (depthStart < 0)
{
std::cerr << "Error: arg 'depth_start' must be at least 0" << std::endl;
return -1;
}
const int depthEnd = depthStart + depths;
const double zoomFactor = (argc >= 6) ? atof(argv[5]) : 0.9;
std::cout << "'depths' = " << depths
<< "\n'width' = " << width
<< "\n'height' = " << height
<< "\n'save_output' = " << doSaveImages
<< "\n'depth_start' = " << depthStart << " (inclusive)"
<< "\n'depth_end' = " << depthEnd << " (exclusive)"
<< "\n'zoom_factor' = " << zoomFactor
<< std::endl;
// Allocate host memory
const int n = depths * height * width;
unsigned* h_intensities = new unsigned[n];
// Allocate device memory
unsigned* d_intensities;
if (hipSuccess != hipMalloc((void**)&d_intensities, sizeof(unsigned) * n))
{
delete[] h_intensities;
std::cerr << "Error: failed to allocate device memory" << std::endl;
return -1;
}
// Launch GPU kernel and begin timing
timeval start, end;
if (doSaveImages || depthEnd > 128)
{
std::cout << "'depth_end' > 128 or 'save_output' = 1, launching kernel with double precision to ensure high quality output" << std::endl;
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( mandelbrotKernel<double>), dim3((n + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, depthStart, depthEnd, zoomFactor, width, height, d_intensities);
}
else
{
std::cout << "'depth_end' <= 128 and 'save_output' = 0, launching kernel with single precision to ensure the best performance" << std::endl;
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( mandelbrotKernel<float>), dim3((n + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, depthStart, depthEnd, zoomFactor, width, height, d_intensities);
}
hipDeviceSynchronize();
// End timing
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(4) << runtime << " s" << std::endl;
// Check for errors from the kernel
hipError_t e = hipGetLastError();
if (hipSuccess != e)
{
delete[] h_intensities;
hipFree(d_intensities);
std::cerr << "CUDA error " << e << ": " << hipGetErrorString(e) << std::endl;
return -1;
}
// Save images if desired
if (doSaveImages)
{
// Copy results to the host
if (hipSuccess != hipMemcpy(h_intensities, d_intensities, sizeof(unsigned) * n, hipMemcpyDeviceToHost))
{
delete[] h_intensities;
hipFree(d_intensities);
std::cerr << "Error: failed to copy from device to host" << std::endl;
return -1;
}
std::cout << "Saving output images, please wait..." << std::endl;
for (int i = depthStart; i < depthEnd; ++i)
{
std::stringstream filename;
filename << "mandelbrot" << std::setw(3) << std::setfill('0') << i << std::setfill(' ') << ".ppm";
savePpmImage(filename.str().c_str(), width, height, &h_intensities[(i - depthStart) * height * width]);
}
std::cout << "Output images successfully saved as .ppm files\n"
<< "Try running 'convert -delay 10 mandelbrot*ppm mandelbrot.gif' to create an animation"
<< std::endl;
}
// Free memory
delete[] h_intensities;
hipFree(d_intensities);
return 0;
}
| 0d4b19bef8268e3387c5c92fa295b3a9f371aabb.cu | #include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <cuda.h>
#include <sys/time.h>
#include "colors.h"
#define MAX_ITERATIONS 65536
#define THREADS_PER_BLOCK 256
__device__ float autoPow(float x, float y)
{
return powf(x, y);
}
__device__ double autoPow(double x, double y)
{
return pow(x, y);
}
template<typename T>
__global__ void mandelbrotKernel(const int depthStart,
const int depthEnd,
const T zoomFactor,
const int width,
const int height,
unsigned* const intensities)
{
// Calculate the global thread index and exit if there is no work to do
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int pz = depthStart + idx / (height * width);
const int py = (idx / width) % height;
const int px = idx % width;
if (idx >= (depthEnd - depthStart) * height * width) return;
// Parameters for a really cool part that has great depth
const T xCenter = static_cast<T>(-0.235125001);
const T yCenter = static_cast<T>(0.827215);
//const T xCenter = static_cast<T>(-0.598274455069517539539);
//const T yCenter = static_cast<T>(0.663825928894102918143);
const T baseVerticalRadius = static_cast<T>(1.0);//(0.00004);
// This controls how much each depth zooms in, bounded by (0,1]. Lower values zoom faster.
//const T zoomFactor = static_cast<T>(0.9);
//const T zoomFactor = static_cast<T>(0.95);
// Compute mandelbrot set
const T yRadius = baseVerticalRadius * autoPow(zoomFactor, pz);
const T xRadius = yRadius * width / height;
const T y0 = yCenter + yRadius - static_cast<T>(2.0) * yRadius * py / (height - 1);
const T x0 = xCenter - xRadius + static_cast<T>(2.0) * xRadius * px / (width - 1);
T x = x0;
T y = y0;
T x2 = x * x;
T y2 = y * y;
unsigned iteration = 0;
while (x2 + y2 <= static_cast<T>(4.0) && iteration < MAX_ITERATIONS)
{
y = static_cast<T>(2.0) * x * y + y0;
x = x2 - y2 + x0;
x2 = x * x;
y2 = y * y;
++iteration;
}
// Store intensity value for determining color later
intensities[idx] = iteration;
}
template __global__ void mandelbrotKernel<float>(const int, const int, const float, const int, const int, unsigned* const);
template __global__ void mandelbrotKernel<double>(const int, const int, const double, const int, const int, unsigned* const);
void savePpmImage(const char* const filename, const int width, const int height, const unsigned* const intensities)
{
// Open the file
std::ofstream outfile(filename);
// Write the header
//outfile << "P3 " << width << " " << height << " " << MAX_ITERATIONS / 2 << "\n";
outfile << "P3 " << width << " " << height << " " << 255 << "\n";
// Write pixel information
for (int y = 0; y < height; ++y)
{
const int yidx = y * width;
for (int x = 0; x < width; ++x)
{
//const std::array<unsigned, 3> color = mapIntensityToColor(intensities[yidx + x]);
//outfile << color[0] << " " << color[1] << " " << color[2] << " ";
const unsigned i = intensities[yidx + x] % 256;
outfile << djl70::paletteRed[i] << " " << djl70::paletteGreen[i] << " " << djl70::paletteBlue[i] << " ";
}
outfile << "\n";
}
outfile.close();
}
void printUsage(const char* const programName)
{
std::cerr << "Usage: " << programName << " <depths> <width> [save_output [depth_start [zoom_factor]]]\n"
<< "Description: Computes the Mandelbrot set using CUDA\n"
<< "Argument combinations worth trying:\n"
<< " " << programName << " 256 120 1\n"
<< " " << programName << " 16 600 1\n"
<< " " << programName << " 64 2400 0\n"
<< " " << programName << " 64 2400 0 64\n"
<< " " << programName << " 512 600 0 0 0.99\n"
<< "\n"
<< "depths: The number of 'layers' to process (each layer 'zooms in' to the set)\n"
<< "width: The width of the images to process (height is set automatically)\n"
<< "save_output: (optional, default 0) Set to 1 to save the processed images (note: not recommended beyond depths=64 and width=600, because otherwise saving the images may require a lot of time and disk space)\n"
<< "depth_start: (optional, default 0) The zero-based 'layer' to begin processing at, inclusive\n"
<< "zoom_factor: (optional, default 0.9) A value (0 < zoom_factor <= 1) deciding how much to zoom for each 'layer'. Higher values produce slower zooms"
<< std::endl;
}
int main(int argc, char* argv[])
{
// Verify command line arguments
if (argc < 3)
{
printUsage(argv[0]);
return -1;
}
const int depths = atoi(argv[1]);
if (depths < 1)
{
std::cerr << "Error: arg 'depths' must be at least 1" << std::endl;
return -1;
}
const int width = atoi(argv[2]);
if (width < 2)
{
std::cerr << "Error: arg 'width' must be at least 2" << std::endl;
return -1;
}
const int height = (float)width * 2.0f / 3.0f;
const bool doSaveImages = (argc >= 4) && (atoi(argv[3]) == 1);
const int depthStart = (argc >= 5) ? atoi(argv[4]) : 0;
if (depthStart < 0)
{
std::cerr << "Error: arg 'depth_start' must be at least 0" << std::endl;
return -1;
}
const int depthEnd = depthStart + depths;
const double zoomFactor = (argc >= 6) ? atof(argv[5]) : 0.9;
std::cout << "'depths' = " << depths
<< "\n'width' = " << width
<< "\n'height' = " << height
<< "\n'save_output' = " << doSaveImages
<< "\n'depth_start' = " << depthStart << " (inclusive)"
<< "\n'depth_end' = " << depthEnd << " (exclusive)"
<< "\n'zoom_factor' = " << zoomFactor
<< std::endl;
// Allocate host memory
const int n = depths * height * width;
unsigned* h_intensities = new unsigned[n];
// Allocate device memory
unsigned* d_intensities;
if (cudaSuccess != cudaMalloc((void**)&d_intensities, sizeof(unsigned) * n))
{
delete[] h_intensities;
std::cerr << "Error: failed to allocate device memory" << std::endl;
return -1;
}
// Launch GPU kernel and begin timing
timeval start, end;
if (doSaveImages || depthEnd > 128)
{
std::cout << "'depth_end' > 128 or 'save_output' = 1, launching kernel with double precision to ensure high quality output" << std::endl;
gettimeofday(&start, NULL);
mandelbrotKernel<double><<<(n + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(depthStart, depthEnd, zoomFactor, width, height, d_intensities);
}
else
{
std::cout << "'depth_end' <= 128 and 'save_output' = 0, launching kernel with single precision to ensure the best performance" << std::endl;
gettimeofday(&start, NULL);
mandelbrotKernel<float><<<(n + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(depthStart, depthEnd, zoomFactor, width, height, d_intensities);
}
cudaDeviceSynchronize();
// End timing
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
std::cout << "Kernel runtime: " << std::fixed << std::setprecision(4) << runtime << " s" << std::endl;
// Check for errors from the kernel
cudaError_t e = cudaGetLastError();
if (cudaSuccess != e)
{
delete[] h_intensities;
cudaFree(d_intensities);
std::cerr << "CUDA error " << e << ": " << cudaGetErrorString(e) << std::endl;
return -1;
}
// Save images if desired
if (doSaveImages)
{
// Copy results to the host
if (cudaSuccess != cudaMemcpy(h_intensities, d_intensities, sizeof(unsigned) * n, cudaMemcpyDeviceToHost))
{
delete[] h_intensities;
cudaFree(d_intensities);
std::cerr << "Error: failed to copy from device to host" << std::endl;
return -1;
}
std::cout << "Saving output images, please wait..." << std::endl;
for (int i = depthStart; i < depthEnd; ++i)
{
std::stringstream filename;
filename << "mandelbrot" << std::setw(3) << std::setfill('0') << i << std::setfill(' ') << ".ppm";
savePpmImage(filename.str().c_str(), width, height, &h_intensities[(i - depthStart) * height * width]);
}
std::cout << "Output images successfully saved as .ppm files\n"
<< "Try running 'convert -delay 10 mandelbrot*ppm mandelbrot.gif' to create an animation"
<< std::endl;
}
// Free memory
delete[] h_intensities;
cudaFree(d_intensities);
return 0;
}
|
72eb6a49961456664c7ae98a2f51fc5eb77c039d.hip | // !!! This is a file automatically generated by hipify!!!
/*! \file Merge.cu
\author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010
\brief The source file for the C interface to CUDA sorting routines.
*/
#ifndef SORT_CU_INCLUDED
#define SORT_CU_INCLUDED
// Redfox Includes
#include <redfox/nvcc/interface/RelationalAlgebraKernel.h>
#include <redfox/ra/interface/Union.h>
#include <redfox/ra/interface/Tuple.h>
// Thrust Includes
#include <thrust/set_operations.h>
#include <thrust/device_ptr.h>
// Hydrazine Includes
//#include <hydrazine/implementation/debug.h>
#include <stdio.h>
struct compare_sort_gpu128
{
__host__ __device__
bool operator()(ra::tuple::PackedNBytes<2> i, ra::tuple::PackedNBytes<2> j)
{
if (i.a[1] != j.a[1])
return (i.a[1] < j.a[1]);
return (i.a[0] < j.a[0]);
}
};
namespace redfox
{
void check(hipError_t status)
{
if(status != hipSuccess)
{
std::cerr << hipGetErrorString(status) << "\n";
std::abort();
}
}
void set_union(void *result, unsigned long long int *size, void* lbegin, void* lend,
void* rbegin, void* rend, unsigned int type)
{
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
unsigned long long int size_host;
switch(type)
{
default: printf("Invalid Type.\n");
case nvcc::RelationalAlgebraKernel::I8:
{
thrust::device_ptr<unsigned char> result_end = thrust::set_union(
thrust::device_ptr<unsigned char>((unsigned char *)lbegin),
thrust::device_ptr<unsigned char>((unsigned char *)lend),
thrust::device_ptr<unsigned char>((unsigned char *)rbegin),
thrust::device_ptr<unsigned char>((unsigned char *)rend),
thrust::device_ptr<unsigned char>((unsigned char *)result));
size_host = (result_end - thrust::device_ptr<unsigned char>((unsigned char *)result)) * sizeof(unsigned char);
break;
}
case nvcc::RelationalAlgebraKernel::I16:
{
thrust::device_ptr<unsigned short> result_end = thrust::set_union(
thrust::device_ptr<unsigned short>((unsigned short*)lbegin),
thrust::device_ptr<unsigned short>((unsigned short*)lend),
thrust::device_ptr<unsigned short>((unsigned short*)rbegin),
thrust::device_ptr<unsigned short>((unsigned short*)rend),
thrust::device_ptr<unsigned short>((unsigned short*)result));
size_host = (result_end - thrust::device_ptr<unsigned short>((unsigned short *)result)) * sizeof(unsigned short);
break;
}
case nvcc::RelationalAlgebraKernel::I32:
{
thrust::device_ptr<unsigned int> result_end = thrust::set_union(
thrust::device_ptr<unsigned int>((unsigned int*)lbegin),
thrust::device_ptr<unsigned int>((unsigned int*)lend),
thrust::device_ptr<unsigned int>((unsigned int*)rbegin),
thrust::device_ptr<unsigned int>((unsigned int*)rend),
thrust::device_ptr<unsigned int>((unsigned int*)result));
size_host = (result_end - thrust::device_ptr<unsigned int>((unsigned int *)result)) * sizeof(unsigned int);
// unsigned int merge_result[10];
// check(hipMemcpy(merge_result, (unsigned int *)result, 4 * 10,
// hipMemcpyDeviceToHost));
//
// for(int i = 0; i < 10; ++i)
// {
// printf("%u %llx\n", i, merge_result[i]);
// }
break;
}
case nvcc::RelationalAlgebraKernel::I64:
{
typedef thrust::device_ptr<long long unsigned int> ptr;
thrust::device_ptr<long long unsigned int> result_end = thrust::set_union(
ptr((long long unsigned int*)lbegin),
ptr((long long unsigned int*)lend),
ptr((long long unsigned int*)rbegin),
ptr((long long unsigned int*)rend),
ptr((long long unsigned int*)result));
size_host = (result_end - thrust::device_ptr<unsigned long long int>((unsigned long long int *)result)) * sizeof(unsigned long long int);
break;
}
case nvcc::RelationalAlgebraKernel::I128:
{
typedef ra::tuple::PackedNBytes<2> type;
typedef thrust::device_ptr<type> ptr;
ptr result_end = thrust::set_union(
ptr((type*)lbegin),
ptr((type*)lend),
ptr((type*)rbegin),
ptr((type*)rend),
ptr((type*)result), compare_sort_gpu128());
size_host = (result_end - ptr((type *)result)) * sizeof(type);
break;
}
}
check(hipMemcpy(size, &size_host, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("union %f\n", exe_time);
printf("after union size %llu \n", size_host);
}
}
#endif
| 72eb6a49961456664c7ae98a2f51fc5eb77c039d.cu | /*! \file Merge.cu
\author Gregory Diamos <gregory.diamos> \date Wednesday December 1, 2010
\brief The source file for the C interface to CUDA sorting routines.
*/
#ifndef SORT_CU_INCLUDED
#define SORT_CU_INCLUDED
// Redfox Includes
#include <redfox/nvcc/interface/RelationalAlgebraKernel.h>
#include <redfox/ra/interface/Union.h>
#include <redfox/ra/interface/Tuple.h>
// Thrust Includes
#include <thrust/set_operations.h>
#include <thrust/device_ptr.h>
// Hydrazine Includes
//#include <hydrazine/implementation/debug.h>
#include <stdio.h>
struct compare_sort_gpu128
{
__host__ __device__
bool operator()(ra::tuple::PackedNBytes<2> i, ra::tuple::PackedNBytes<2> j)
{
if (i.a[1] != j.a[1])
return (i.a[1] < j.a[1]);
return (i.a[0] < j.a[0]);
}
};
namespace redfox
{
void check(cudaError_t status)
{
if(status != cudaSuccess)
{
std::cerr << cudaGetErrorString(status) << "\n";
std::abort();
}
}
void set_union(void *result, unsigned long long int *size, void* lbegin, void* lend,
void* rbegin, void* rend, unsigned int type)
{
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
unsigned long long int size_host;
switch(type)
{
default: printf("Invalid Type.\n");
case nvcc::RelationalAlgebraKernel::I8:
{
thrust::device_ptr<unsigned char> result_end = thrust::set_union(
thrust::device_ptr<unsigned char>((unsigned char *)lbegin),
thrust::device_ptr<unsigned char>((unsigned char *)lend),
thrust::device_ptr<unsigned char>((unsigned char *)rbegin),
thrust::device_ptr<unsigned char>((unsigned char *)rend),
thrust::device_ptr<unsigned char>((unsigned char *)result));
size_host = (result_end - thrust::device_ptr<unsigned char>((unsigned char *)result)) * sizeof(unsigned char);
break;
}
case nvcc::RelationalAlgebraKernel::I16:
{
thrust::device_ptr<unsigned short> result_end = thrust::set_union(
thrust::device_ptr<unsigned short>((unsigned short*)lbegin),
thrust::device_ptr<unsigned short>((unsigned short*)lend),
thrust::device_ptr<unsigned short>((unsigned short*)rbegin),
thrust::device_ptr<unsigned short>((unsigned short*)rend),
thrust::device_ptr<unsigned short>((unsigned short*)result));
size_host = (result_end - thrust::device_ptr<unsigned short>((unsigned short *)result)) * sizeof(unsigned short);
break;
}
case nvcc::RelationalAlgebraKernel::I32:
{
thrust::device_ptr<unsigned int> result_end = thrust::set_union(
thrust::device_ptr<unsigned int>((unsigned int*)lbegin),
thrust::device_ptr<unsigned int>((unsigned int*)lend),
thrust::device_ptr<unsigned int>((unsigned int*)rbegin),
thrust::device_ptr<unsigned int>((unsigned int*)rend),
thrust::device_ptr<unsigned int>((unsigned int*)result));
size_host = (result_end - thrust::device_ptr<unsigned int>((unsigned int *)result)) * sizeof(unsigned int);
// unsigned int merge_result[10];
// check(cudaMemcpy(merge_result, (unsigned int *)result, 4 * 10,
// cudaMemcpyDeviceToHost));
//
// for(int i = 0; i < 10; ++i)
// {
// printf("%u %llx\n", i, merge_result[i]);
// }
break;
}
case nvcc::RelationalAlgebraKernel::I64:
{
typedef thrust::device_ptr<long long unsigned int> ptr;
thrust::device_ptr<long long unsigned int> result_end = thrust::set_union(
ptr((long long unsigned int*)lbegin),
ptr((long long unsigned int*)lend),
ptr((long long unsigned int*)rbegin),
ptr((long long unsigned int*)rend),
ptr((long long unsigned int*)result));
size_host = (result_end - thrust::device_ptr<unsigned long long int>((unsigned long long int *)result)) * sizeof(unsigned long long int);
break;
}
case nvcc::RelationalAlgebraKernel::I128:
{
typedef ra::tuple::PackedNBytes<2> type;
typedef thrust::device_ptr<type> ptr;
ptr result_end = thrust::set_union(
ptr((type*)lbegin),
ptr((type*)lend),
ptr((type*)rbegin),
ptr((type*)rend),
ptr((type*)result), compare_sort_gpu128());
size_host = (result_end - ptr((type *)result)) * sizeof(type);
break;
}
}
check(cudaMemcpy(size, &size_host, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("union %f\n", exe_time);
printf("after union size %llu \n", size_host);
}
}
#endif
|
feee8795c576b88eb1b2cea248ca1ec50fa6d43a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecgs.cu, normal z -> d, Thu Oct 8 23:05:47 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from dcgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dcgs_1_kernel(
int num_rows,
int num_cols,
double beta,
double *r,
double *q,
double *u,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
q magmaDouble_ptr
vector
@param[in,out]
u magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr r,
magmaDouble_ptr q,
magmaDouble_ptr u,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dcgs_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_2_kernel(
int num_rows,
int num_cols,
double *r,
double *u,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaDouble_ptr
vector
@param[in,out]
u magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDouble_ptr r,
magmaDouble_ptr u,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dcgs_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_3_kernel(
int num_rows,
int num_cols,
double alpha,
double *v_hat,
double *u,
double *q,
double *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
v_hat magmaDouble_ptr
vector
@param[in]
u magmaDouble_ptr
vector
@param[in,out]
q magmaDouble_ptr
vector
@param[in,out]
t magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr v_hat,
magmaDouble_ptr u,
magmaDouble_ptr q,
magmaDouble_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dcgs_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_4_kernel(
int num_rows,
int num_cols,
double alpha,
double *u_hat,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
u_hat magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr u_hat,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dcgs_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
| feee8795c576b88eb1b2cea248ca1ec50fa6d43a.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@generated from sparse/blas/zmergecgs.cu, normal z -> d, Thu Oct 8 23:05:47 2020
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from dcgs into one.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dcgs_1_kernel(
int num_rows,
int num_cols,
double beta,
double *r,
double *q,
double *u,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp;
tmp = r[ i+j*num_rows ] + beta * q[ i+j*num_rows ];
p[ i+j*num_rows ] = tmp + beta * q[ i+j*num_rows ]
+ beta * beta * p[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r + beta q
p = u + beta*(q + beta*p)
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
beta double
scalar
@param[in]
r magmaDouble_ptr
vector
@param[in]
q magmaDouble_ptr
vector
@param[in,out]
u magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_1(
magma_int_t num_rows,
magma_int_t num_cols,
double beta,
magmaDouble_ptr r,
magmaDouble_ptr q,
magmaDouble_ptr u,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dcgs_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, r, q, u, p );
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_2_kernel(
int num_rows,
int num_cols,
double *r,
double *u,
double *p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double tmp;
tmp = r[ i+j*num_rows ];
u[ i+j*num_rows ] = tmp;
p[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
u = r
p = r
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
r magmaDouble_ptr
vector
@param[in,out]
u magmaDouble_ptr
vector
@param[in,out]
p magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_2(
magma_int_t num_rows,
magma_int_t num_cols,
magmaDouble_ptr r,
magmaDouble_ptr u,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dcgs_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, r, u, p);
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_3_kernel(
int num_rows,
int num_cols,
double alpha,
double *v_hat,
double *u,
double *q,
double *t )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
double uloc, tmp;
uloc = u[ i+j*num_rows ];
tmp = uloc - alpha * v_hat[ i+j*num_rows ];
t[ i+j*num_rows ] = tmp + uloc;
q[ i+j*num_rows ] = tmp;
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
q = u - alpha v_hat
t = u + q
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
v_hat magmaDouble_ptr
vector
@param[in]
u magmaDouble_ptr
vector
@param[in,out]
q magmaDouble_ptr
vector
@param[in,out]
t magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_3(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr v_hat,
magmaDouble_ptr u,
magmaDouble_ptr q,
magmaDouble_ptr t,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dcgs_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, v_hat, u, q, t );
return MAGMA_SUCCESS;
}
__global__ void
magma_dcgs_4_kernel(
int num_rows,
int num_cols,
double alpha,
double *u_hat,
double *t,
double *x,
double *r )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i<num_rows ) {
for( int j=0; j<num_cols; j++ ){
x[ i+j*num_rows ] = x[ i+j*num_rows ]
+ alpha * u_hat[ i+j*num_rows ];
r[ i+j*num_rows ] = r[ i+j*num_rows ]
- alpha * t[ i+j*num_rows ];
}
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x = x + alpha u_hat
r = r -alpha*A u_hat = r -alpha*t
Arguments
---------
@param[in]
num_rows magma_int_t
dimension m
@param[in]
num_cols magma_int_t
dimension n
@param[in]
alpha double
scalar
@param[in]
u_hat magmaDouble_ptr
vector
@param[in]
t magmaDouble_ptr
vector
@param[in,out]
x magmaDouble_ptr
vector
@param[in,out]
r magmaDouble_ptr
vector
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C"
magma_int_t
magma_dcgs_4(
magma_int_t num_rows,
magma_int_t num_cols,
double alpha,
magmaDouble_ptr u_hat,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) );
magma_dcgs_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, u_hat, t, x, r );
return MAGMA_SUCCESS;
}
|
534a61874d4b4a0d296f608101079d0dbdb6c877.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "structs.cuh"
#include "utils.cuh"
#include "inverted_index.cuh"
void print_sets(vector<Entry> &entries, vector<int> &sizes, vector<int> &start) {
printf("\nSets:\n");
for (int i = 0; i < sizes.size(); i++) {
printf("[%d]: ", i);
for (int j = 0; j < sizes[i]; j++) {
printf(" %d ", entries[start[i] + j].term_id);
}
printf("\n");
}
}
void print_invertedIndex(InvertedIndex index) {
printf("Docs: %d\nEntries: %d\nTerms: %d\n", index.num_docs, index.num_entries, index.num_terms);
Entry *inverted_index = (Entry*)malloc(sizeof(Entry)*index.num_entries);
hipMemcpyAsync(inverted_index, index.d_inverted_index, sizeof(Entry)*index.num_entries, hipMemcpyDeviceToHost);
int *count = (int *)malloc(sizeof(int)*index.num_terms);
hipMemcpyAsync(count, index.d_count, sizeof(int)*index.num_terms, hipMemcpyDeviceToHost);
int *h_index = (int *)malloc(sizeof(int)*index.num_terms);
hipMemcpyAsync(h_index, index.d_index, sizeof(int)*index.num_terms, hipMemcpyDeviceToHost);
printf("Count: ");
for (int i = 0; i < index.num_terms; i++) {
printf("%d ", count[i]);
}
printf("\nList's ends: ");
for (int i = 0; i < index.num_terms; i++) {
printf("%d ", h_index[i]);
}
printf("\nIndex:");
int term = -1;
for (int i = 0; i < index.num_entries; i++) {
if (term != inverted_index[i].term_id) {
printf("\n[%d]: ", inverted_index[i].term_id);
term = inverted_index[i].term_id;
}
printf("%d ", inverted_index[i].set_id);
}
printf("\n");
}
void print_intersection(int *intersection, int block_size, int indexed, int probe) {
int *h_intersection = (int *)malloc(sizeof(int)*block_size*block_size);
hipMemcpyAsync(h_intersection, intersection, sizeof(int)*block_size*block_size, hipMemcpyDeviceToHost);
printf("\n===Intersection (%d, %d):===\n ", probe, indexed);
for (int i = 0; i < block_size; i++) {
printf("[%d]", indexed*block_size + i);
}
printf("\n");
for (int i = 0; i < block_size; i++) {
printf("[%d]", i + probe*block_size);
for (int j = 0; j < block_size; j++) {
printf(" %d ", h_intersection[i*block_size + j]);// > 0? 1: 0);
}
printf("\n");
}
printf("==========================\n");
}
void print_result(Pair *pairs, int size) {
printf("\n============ Similarity Join Result ============\n");
for (int i = 0; i < size; i++) {
printf("[%d, %d]:%.3f ", pairs[i].set_x, pairs[i].set_y, pairs[i].similarity);
}
printf("\n================================================\n");
}
| 534a61874d4b4a0d296f608101079d0dbdb6c877.cu | #include <stdio.h>
#include <stdlib.h>
#include "structs.cuh"
#include "utils.cuh"
#include "inverted_index.cuh"
void print_sets(vector<Entry> &entries, vector<int> &sizes, vector<int> &start) {
printf("\nSets:\n");
for (int i = 0; i < sizes.size(); i++) {
printf("[%d]: ", i);
for (int j = 0; j < sizes[i]; j++) {
printf(" %d ", entries[start[i] + j].term_id);
}
printf("\n");
}
}
void print_invertedIndex(InvertedIndex index) {
printf("Docs: %d\nEntries: %d\nTerms: %d\n", index.num_docs, index.num_entries, index.num_terms);
Entry *inverted_index = (Entry*)malloc(sizeof(Entry)*index.num_entries);
cudaMemcpyAsync(inverted_index, index.d_inverted_index, sizeof(Entry)*index.num_entries, cudaMemcpyDeviceToHost);
int *count = (int *)malloc(sizeof(int)*index.num_terms);
cudaMemcpyAsync(count, index.d_count, sizeof(int)*index.num_terms, cudaMemcpyDeviceToHost);
int *h_index = (int *)malloc(sizeof(int)*index.num_terms);
cudaMemcpyAsync(h_index, index.d_index, sizeof(int)*index.num_terms, cudaMemcpyDeviceToHost);
printf("Count: ");
for (int i = 0; i < index.num_terms; i++) {
printf("%d ", count[i]);
}
printf("\nList's ends: ");
for (int i = 0; i < index.num_terms; i++) {
printf("%d ", h_index[i]);
}
printf("\nIndex:");
int term = -1;
for (int i = 0; i < index.num_entries; i++) {
if (term != inverted_index[i].term_id) {
printf("\n[%d]: ", inverted_index[i].term_id);
term = inverted_index[i].term_id;
}
printf("%d ", inverted_index[i].set_id);
}
printf("\n");
}
void print_intersection(int *intersection, int block_size, int indexed, int probe) {
int *h_intersection = (int *)malloc(sizeof(int)*block_size*block_size);
cudaMemcpyAsync(h_intersection, intersection, sizeof(int)*block_size*block_size, cudaMemcpyDeviceToHost);
printf("\n===Intersection (%d, %d):===\n ", probe, indexed);
for (int i = 0; i < block_size; i++) {
printf("[%d]", indexed*block_size + i);
}
printf("\n");
for (int i = 0; i < block_size; i++) {
printf("[%d]", i + probe*block_size);
for (int j = 0; j < block_size; j++) {
printf(" %d ", h_intersection[i*block_size + j]);// > 0? 1: 0);
}
printf("\n");
}
printf("==========================\n");
}
void print_result(Pair *pairs, int size) {
printf("\n============ Similarity Join Result ============\n");
for (int i = 0; i < size; i++) {
printf("[%d, %d]:%.3f ", pairs[i].set_x, pairs[i].set_y, pairs[i].similarity);
}
printf("\n================================================\n");
}
|
4e6138d1328241dadf9f837dea0616afee69b7fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include "core/providers/cuda/cuda_common.h"
#include "orttraining/training_ops/cuda/optimizer/clip_grad_norm/clip_grad_norm_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void ClipGradNorm(
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
T* gradients_chunk_ptr = static_cast<T*>(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;
#pragma unroll(4)
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float clip_coefficient = max_norm / (*total_norm + epsilon);
gradients_chunk_ptr[i] = static_cast<T>(gradients_chunk_ptr[i]) *
static_cast<T>(fminf(clip_coefficient, 1.0f));
}
}
template <typename T>
void ClipGradNormFunctor<T>::operator()(
hipStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int num_blocks_per_grid = chunks.chunk_count;
const int num_threads_per_block = ChunkGroup<ClipGradNormGroupSize>::thread_count_per_block;
hipLaunchKernelGGL(( ClipGradNorm<T>), dim3(num_blocks_per_grid), dim3(num_threads_per_block), 0, stream, chunks, total_norm, epsilon, max_norm);
}
#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \
template void ClipGradNormFunctor<T>::operator()(hipStream_t stream, \
ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm); \
\
template __global__ void ClipGradNorm<T>(ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm);
SPECIALIZE_CLIPGRADNORM_FUNCTOR(float);
} // namespace cuda
} // namespace onnxruntime
| 4e6138d1328241dadf9f837dea0616afee69b7fc.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <algorithm>
#include "core/providers/cuda/cuda_common.h"
#include "orttraining/training_ops/cuda/optimizer/clip_grad_norm/clip_grad_norm_impl.h"
namespace onnxruntime {
namespace cuda {
template <typename T>
__global__ void ClipGradNorm(
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int tensor_idx = chunks.block_index_to_tensor_group_index[blockIdx.x];
const int tensor_size = chunks.tensor_sizes[tensor_idx];
const int chunk_start_idx = chunks.block_index_to_chunk_start_index[blockIdx.x];
// chunk_size is chunks.chunk_size if the loaded chunk is full. Otherwise (this
// chunk is the last one in the source tensor), the actual size is determined
// by the bound of the source tensor.
const int chunk_size = min(tensor_size, chunk_start_idx + chunks.chunk_size) - chunk_start_idx;
T* gradients_chunk_ptr = static_cast<T*>(chunks.tensor_ptrs[0][tensor_idx]) + chunk_start_idx;
#pragma unroll(4)
for (int i = threadIdx.x; i < chunk_size; i += blockDim.x) {
float clip_coefficient = max_norm / (*total_norm + epsilon);
gradients_chunk_ptr[i] = static_cast<T>(gradients_chunk_ptr[i]) *
static_cast<T>(fminf(clip_coefficient, 1.0f));
}
}
template <typename T>
void ClipGradNormFunctor<T>::operator()(
cudaStream_t stream,
ChunkGroup<ClipGradNormGroupSize> chunks,
const float* total_norm,
const float epsilon,
const float max_norm) {
const int num_blocks_per_grid = chunks.chunk_count;
const int num_threads_per_block = ChunkGroup<ClipGradNormGroupSize>::thread_count_per_block;
ClipGradNorm<T><<<num_blocks_per_grid, num_threads_per_block, 0, stream>>>(chunks, total_norm, epsilon, max_norm);
}
#define SPECIALIZE_CLIPGRADNORM_FUNCTOR(T) \
template void ClipGradNormFunctor<T>::operator()(cudaStream_t stream, \
ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm); \
\
template __global__ void ClipGradNorm<T>(ChunkGroup<ClipGradNormGroupSize> chunks, \
const float* total_norm, \
const float epsilon, \
const float max_norm);
SPECIALIZE_CLIPGRADNORM_FUNCTOR(float);
} // namespace cuda
} // namespace onnxruntime
|
879be7e61104182baceaf13965e73f1f1c2cb19a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int *odata, int *idata, int offset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= n) {
return;
}
if (idx >= offset) {
odata[idx] = idata[idx - offset] + idata[idx];
}
else {
odata[idx] = idata[idx];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int* dev_arr1,* dev_arr2;
hipMalloc((void**)&dev_arr1, n * sizeof(int));
hipMalloc((void**)&dev_arr2, n * sizeof(int));
hipMemcpy(dev_arr1, idata, n * sizeof(int), hipMemcpyHostToDevice);
dim3 blockNum((n + blockSize - 1) / blockSize);
int maxDepth = ilog2ceil(n);
timer().startGpuTimer();
for (int d = 1; d <= maxDepth; d++) {
kernNaiveScan << <blockNum, blockSize >> > (n, dev_arr2, dev_arr1, pow(2.0,d-1));
// ping pong
if (d < maxDepth) {
int* temp = dev_arr1;
dev_arr1 = dev_arr2;
dev_arr2 = temp;
}
}
timer().endGpuTimer();
hipMemcpy(odata + 1, dev_arr2, (n - 1) * sizeof(int), hipMemcpyDeviceToHost);
odata[0] = 0;
hipFree(dev_arr1);
hipFree(dev_arr2);
}
}
}
| 879be7e61104182baceaf13965e73f1f1c2cb19a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
__global__ void kernNaiveScan(int n, int *odata, int *idata, int offset) {
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx >= n) {
return;
}
if (idx >= offset) {
odata[idx] = idata[idx - offset] + idata[idx];
}
else {
odata[idx] = idata[idx];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
int* dev_arr1,* dev_arr2;
cudaMalloc((void**)&dev_arr1, n * sizeof(int));
cudaMalloc((void**)&dev_arr2, n * sizeof(int));
cudaMemcpy(dev_arr1, idata, n * sizeof(int), cudaMemcpyHostToDevice);
dim3 blockNum((n + blockSize - 1) / blockSize);
int maxDepth = ilog2ceil(n);
timer().startGpuTimer();
for (int d = 1; d <= maxDepth; d++) {
kernNaiveScan << <blockNum, blockSize >> > (n, dev_arr2, dev_arr1, pow(2.0,d-1));
// ping pong
if (d < maxDepth) {
int* temp = dev_arr1;
dev_arr1 = dev_arr2;
dev_arr2 = temp;
}
}
timer().endGpuTimer();
cudaMemcpy(odata + 1, dev_arr2, (n - 1) * sizeof(int), cudaMemcpyDeviceToHost);
odata[0] = 0;
cudaFree(dev_arr1);
cudaFree(dev_arr2);
}
}
}
|
2cce6cbc24f8816a49c7941b4361a2df50b30a49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
using namespace std;
const int N=2000;
const int THREADS_PER_BLOCK_1D = 1024;
__global__ void MatAdd (float *A, float *B, float * C, int N)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int index=i*N+j;
if (i < N && j < N)
C[index] = A[index] + B[index];
}
__global__ void MatAdd_Filas( float *A, float *B, float *C, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute column index
if(i < N){
int row=i*N; // Compute global 1D index
for(int index = row; index < row+N; index++){
C[index] = A[index] + B[index]; // Compute C element
}
}
}
__global__ void MatAdd_Columnas( float *A, float *B, float *C, int N)
{
int j = blockIdx.x * blockDim.x + threadIdx.x; // Indica la columna que computar la hebra
if(j < N){
for(int i=0; i < N; i++){ // para cada fila
int index = i*N + j;
C[index] = A[index] + B[index]; // Compute C element
}
}
}
//**************************************************************************
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1e-6);
}
//**************************************************************************
int main()
{
int i;
const int NN=N*N;
/* pointers to host memory */
/* Allocate arrays A, B and C on host*/
float * A = (float*) malloc(NN*sizeof(float));
float * B = (float*) malloc(NN*sizeof(float));
float * C_original = (float*) malloc(NN*sizeof(float));
float * C_row = (float*) malloc(NN*sizeof(float));
float * C_column = (float*) malloc(NN*sizeof(float));
/* pointers to device memory */
float *A_d, *B_d, *C_d_original, *C_d_row, *C_d_column;
/* Allocate arrays a_d, b_d and c_d on device*/
hipMalloc ((void **) &A_d, sizeof(float)*NN);
hipMalloc ((void **) &B_d, sizeof(float)*NN);
hipMalloc ((void **) &C_d_original, sizeof(float)*NN);
hipMalloc ((void **) &C_d_row, sizeof(float)*NN);
hipMalloc ((void **) &C_d_column, sizeof(float)*NN);
/* Initialize arrays a and b */
for (i=0; i<NN;i++)
{
A[i]= (float) i;
B[i]= (float) i;
}
/* Copy data from host memory to device memory */
hipMemcpy(A_d, A, sizeof(float)*NN, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, sizeof(float)*NN, hipMemcpyHostToDevice);
/* Compute the execution configuration */
dim3 threadsPerBlock (sqrt(THREADS_PER_BLOCK_1D), sqrt(THREADS_PER_BLOCK_1D));
dim3 numBlocks( ceil ((float)(N)/threadsPerBlock.x), ceil ((float)(N)/threadsPerBlock.y) );
//*********************************Original Kernel****************************
double t1 = cpuSecond();
hipLaunchKernelGGL((
MatAdd) , dim3(numBlocks), dim3(threadsPerBlock), 0, 0, A_d, B_d, C_d_original, N);
double tKernelOriginal = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
hipMemcpy(C_original, C_d_original, sizeof(float)*NN, hipMemcpyDeviceToHost);
//*********************************Row Kernel****************************
t1 = cpuSecond();
/* Compute the execution configuration */
int numBlocks_1D = ceil ((float)(N)/THREADS_PER_BLOCK_1D);
hipLaunchKernelGGL((
MatAdd_Filas) , dim3(numBlocks_1D), dim3(THREADS_PER_BLOCK_1D), 0, 0, A_d, B_d, C_d_row, N);
double tKernelRow = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
hipMemcpy(C_row, C_d_row, sizeof(float)*NN, hipMemcpyDeviceToHost);
//*********************************Column Kernel****************************
t1 = cpuSecond();
hipLaunchKernelGGL((
MatAdd_Columnas) , dim3(numBlocks_1D), dim3(THREADS_PER_BLOCK_1D), 0, 0, A_d, B_d, C_d_column, N);
double tKernelColumn = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
hipMemcpy(C_column, C_d_column, sizeof(float)*NN, hipMemcpyDeviceToHost);
//*********************************** Print Results ******************************************
cout << endl;
cout << "N : " << N << endl;
int tamaBloque = threadsPerBlock.x * threadsPerBlock.y;
cout << "Tamao de bloque : " << tamaBloque << endl;
cout << "Tiempo de kernel original : " << tKernelOriginal << endl;
cout << "Tiempo de kernel por filas : " << tKernelRow << endl;
cout << "Tiempo de kernel por columnas : " << tKernelColumn << endl<<endl;
cout << "Ganancia de kernel por filas : " << tKernelOriginal / tKernelRow << endl;
cout << "Ganancia de kernel por columnas : " << tKernelOriginal / tKernelColumn << endl<<endl<<endl;
/* Print c */
//for (i=0; i<NN;i++)
//printf(" c[%d]=%f\n",i,C_column[i]);
/* Free the memory */
free(A); free(B); free(C_original); free(C_row); free(C_column);
hipFree(A_d); hipFree(B_d);hipFree(C_d_original);hipFree(C_d_row);hipFree(C_d_column);
}
| 2cce6cbc24f8816a49c7941b4361a2df50b30a49.cu | #include <iostream>
#include <fstream>
#include <string.h>
#include <sys/time.h>
using namespace std;
const int N=2000;
const int THREADS_PER_BLOCK_1D = 1024;
__global__ void MatAdd (float *A, float *B, float * C, int N)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int index=i*N+j;
if (i < N && j < N)
C[index] = A[index] + B[index];
}
__global__ void MatAdd_Filas( float *A, float *B, float *C, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; // Compute column index
if(i < N){
int row=i*N; // Compute global 1D index
for(int index = row; index < row+N; index++){
C[index] = A[index] + B[index]; // Compute C element
}
}
}
__global__ void MatAdd_Columnas( float *A, float *B, float *C, int N)
{
int j = blockIdx.x * blockDim.x + threadIdx.x; // Indica la columna que computará la hebra
if(j < N){
for(int i=0; i < N; i++){ // para cada fila
int index = i*N + j;
C[index] = A[index] + B[index]; // Compute C element
}
}
}
//**************************************************************************
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return((double)tp.tv_sec + (double)tp.tv_usec*1e-6);
}
//**************************************************************************
int main()
{
int i;
const int NN=N*N;
/* pointers to host memory */
/* Allocate arrays A, B and C on host*/
float * A = (float*) malloc(NN*sizeof(float));
float * B = (float*) malloc(NN*sizeof(float));
float * C_original = (float*) malloc(NN*sizeof(float));
float * C_row = (float*) malloc(NN*sizeof(float));
float * C_column = (float*) malloc(NN*sizeof(float));
/* pointers to device memory */
float *A_d, *B_d, *C_d_original, *C_d_row, *C_d_column;
/* Allocate arrays a_d, b_d and c_d on device*/
cudaMalloc ((void **) &A_d, sizeof(float)*NN);
cudaMalloc ((void **) &B_d, sizeof(float)*NN);
cudaMalloc ((void **) &C_d_original, sizeof(float)*NN);
cudaMalloc ((void **) &C_d_row, sizeof(float)*NN);
cudaMalloc ((void **) &C_d_column, sizeof(float)*NN);
/* Initialize arrays a and b */
for (i=0; i<NN;i++)
{
A[i]= (float) i;
B[i]= (float) i;
}
/* Copy data from host memory to device memory */
cudaMemcpy(A_d, A, sizeof(float)*NN, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(float)*NN, cudaMemcpyHostToDevice);
/* Compute the execution configuration */
dim3 threadsPerBlock (sqrt(THREADS_PER_BLOCK_1D), sqrt(THREADS_PER_BLOCK_1D));
dim3 numBlocks( ceil ((float)(N)/threadsPerBlock.x), ceil ((float)(N)/threadsPerBlock.y) );
//*********************************Original Kernel****************************
double t1 = cpuSecond();
MatAdd <<<numBlocks, threadsPerBlock>>> (A_d, B_d, C_d_original, N);
double tKernelOriginal = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
cudaMemcpy(C_original, C_d_original, sizeof(float)*NN, cudaMemcpyDeviceToHost);
//*********************************Row Kernel****************************
t1 = cpuSecond();
/* Compute the execution configuration */
int numBlocks_1D = ceil ((float)(N)/THREADS_PER_BLOCK_1D);
MatAdd_Filas <<<numBlocks_1D, THREADS_PER_BLOCK_1D>>> (A_d, B_d, C_d_row, N);
double tKernelRow = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
cudaMemcpy(C_row, C_d_row, sizeof(float)*NN, cudaMemcpyDeviceToHost);
//*********************************Column Kernel****************************
t1 = cpuSecond();
MatAdd_Columnas <<<numBlocks_1D, THREADS_PER_BLOCK_1D>>> (A_d, B_d, C_d_column, N);
double tKernelColumn = cpuSecond() - t1;
/* Copy data from deveice memory to host memory */
cudaMemcpy(C_column, C_d_column, sizeof(float)*NN, cudaMemcpyDeviceToHost);
//*********************************** Print Results ******************************************
cout << endl;
cout << "N : " << N << endl;
int tamaBloque = threadsPerBlock.x * threadsPerBlock.y;
cout << "Tamaño de bloque : " << tamaBloque << endl;
cout << "Tiempo de kernel original : " << tKernelOriginal << endl;
cout << "Tiempo de kernel por filas : " << tKernelRow << endl;
cout << "Tiempo de kernel por columnas : " << tKernelColumn << endl<<endl;
cout << "Ganancia de kernel por filas : " << tKernelOriginal / tKernelRow << endl;
cout << "Ganancia de kernel por columnas : " << tKernelOriginal / tKernelColumn << endl<<endl<<endl;
/* Print c */
//for (i=0; i<NN;i++)
//printf(" c[%d]=%f\n",i,C_column[i]);
/* Free the memory */
free(A); free(B); free(C_original); free(C_row); free(C_column);
cudaFree(A_d); cudaFree(B_d);cudaFree(C_d_original);cudaFree(C_d_row);cudaFree(C_d_column);
}
|
501135ab87a23333d2cf061c1208d0edc2f846a3.hip | // !!! This is a file automatically generated by hipify!!!
/*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#ifdef __HIPCC__
#define cuda_SYNCTHREADS() __syncthreads();
#else
#define cuda_SYNCTHREADS()
#endif
#include <hip/device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_vector_types.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/*
sincronizacion de hilos, verificacion de posibles errores,
suma las potencias de elementos de un vector en el orden de log2(n)
*/
#define N 8
__device__ float valores(float, float);
__host__ void check_CUDA_Error(const char *mensaje)
{
hipError_t error;
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess) {
printf("ERROR %d: %s (%s)\n", error, hipGetErrorString(error), mensaje);
}
}
__global__ void reduccion(float *vector, float *suma)
{
//reserva de memoria en la zona de memoria compartida
__shared__ float temporal[N];
//indice local de cada hilo -> kernel con un solo bloque
int Id = threadIdx.x;
//copiamos en 'temporal' el vector y sincronizamos los hilos
temporal[Id] = vector[Id];
cuda_SYNCTHREADS();
//reduccion paralela
int salto = N / 2;
//realizamos log2(N) iteraciones
while (salto)
{
//solo trabajan la mitad de los hilos
if (Id < salto)
{
temporal[Id] = (1 / powf(temporal[Id], 2)) + (1 / powf(temporal[Id + salto], 2));
printf("temporal: %.3f\n", temporal[Id]);
}
//cuda_SYNCTHREADS();
cuda_SYNCTHREADS();
salto = salto / 2;
}
//el hilo 0 escribe el resultado final en la memoria global
if (Id == 0)
{
*suma = temporal[Id];
}
}
__device__ float valores(float valor1, float valor2) {
float suma = (1 / pow(valor1, 2)) + (1 / pow(valor2, 2));
return suma;
}
int main(int argc, char** argv)
{
float *vector1, *resultado;
float *dev_vector1, *dev_resultado;
int size = N * sizeof(float);
//reserva de memoria en el host
vector1 = (float*)malloc(size);
resultado = (float*)malloc(size);
//reserva de memoria en el device
hipMalloc((void**)&dev_vector1, size);
hipMalloc((void**)&dev_resultado, size);
// inicializacion de los vectores
for (int i = 0; i < N; i++) {
vector1[i] = (float)i + 1;
}
//enviar los datos hacia el Device
hipMemcpy(dev_vector1, vector1, size, hipMemcpyHostToDevice);
//lanzamiento del kernel
hipLaunchKernelGGL(( reduccion), dim3(1), dim3(N), 0, 0, dev_vector1, dev_resultado);
//recogida de los datos
hipMemcpy(resultado, dev_resultado, size, hipMemcpyDeviceToHost);
//impresion de los datos
printf("\n>vector1: \n");
for (int i = 0; i < N; i++) {
printf("%.3f, ", 1/pow(vector1[i],2));
}
printf("\n");
printf(">suma: \n");
for (int i = 0; i < N; i++) {
printf("%.3f, ", resultado[i]);
}
printf("\n");
//liberacion de memoria del device y host
hipFree(dev_vector1);
hipFree(dev_resultado);
free(vector1);
free(resultado);
printf("\n...");
fflush(stdin);
char tecla = getchar();
return 0;
}
| 501135ab87a23333d2cf061c1208d0edc2f846a3.cu | /*
autor fredy m
uaem
[email protected] para mas comentarios
*/
#ifdef __CUDACC__
#define cuda_SYNCTHREADS() __syncthreads();
#else
#define cuda_SYNCTHREADS()
#endif
#include <device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector_types.h>
#include <cuda.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/*
sincronizacion de hilos, verificacion de posibles errores,
suma las potencias de elementos de un vector en el orden de log2(n)
*/
#define N 8
__device__ float valores(float, float);
__host__ void check_CUDA_Error(const char *mensaje)
{
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess) {
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
}
}
__global__ void reduccion(float *vector, float *suma)
{
//reserva de memoria en la zona de memoria compartida
__shared__ float temporal[N];
//indice local de cada hilo -> kernel con un solo bloque
int Id = threadIdx.x;
//copiamos en 'temporal' el vector y sincronizamos los hilos
temporal[Id] = vector[Id];
cuda_SYNCTHREADS();
//reduccion paralela
int salto = N / 2;
//realizamos log2(N) iteraciones
while (salto)
{
//solo trabajan la mitad de los hilos
if (Id < salto)
{
temporal[Id] = (1 / powf(temporal[Id], 2)) + (1 / powf(temporal[Id + salto], 2));
printf("temporal: %.3f\n", temporal[Id]);
}
//cuda_SYNCTHREADS();
cuda_SYNCTHREADS();
salto = salto / 2;
}
//el hilo 0 escribe el resultado final en la memoria global
if (Id == 0)
{
*suma = temporal[Id];
}
}
__device__ float valores(float valor1, float valor2) {
float suma = (1 / pow(valor1, 2)) + (1 / pow(valor2, 2));
return suma;
}
int main(int argc, char** argv)
{
float *vector1, *resultado;
float *dev_vector1, *dev_resultado;
int size = N * sizeof(float);
//reserva de memoria en el host
vector1 = (float*)malloc(size);
resultado = (float*)malloc(size);
//reserva de memoria en el device
cudaMalloc((void**)&dev_vector1, size);
cudaMalloc((void**)&dev_resultado, size);
// inicializacion de los vectores
for (int i = 0; i < N; i++) {
vector1[i] = (float)i + 1;
}
//enviar los datos hacia el Device
cudaMemcpy(dev_vector1, vector1, size, cudaMemcpyHostToDevice);
//lanzamiento del kernel
reduccion<<<1, N>>>(dev_vector1, dev_resultado);
//recogida de los datos
cudaMemcpy(resultado, dev_resultado, size, cudaMemcpyDeviceToHost);
//impresion de los datos
printf("\n>vector1: \n");
for (int i = 0; i < N; i++) {
printf("%.3f, ", 1/pow(vector1[i],2));
}
printf("\n");
printf(">suma: \n");
for (int i = 0; i < N; i++) {
printf("%.3f, ", resultado[i]);
}
printf("\n");
//liberacion de memoria del device y host
cudaFree(dev_vector1);
cudaFree(dev_resultado);
free(vector1);
free(resultado);
printf("\n...");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
b3949d9665c358202bdc2dfc6c595ee9469bc0cc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#define CUBLAS_CHECK(condition) \
do \
{ \
hipblasStatus_t status = condition; \
if (status != HIPBLAS_STATUS_SUCCESS) \
{ \
printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \
} \
} while (0)
// this scatter kernel works on a 2d table writing rows
// index is 1-D array
// updates is 2-D array
// output is 2-D array
// output[index[i]] = updates[i]
__global__ void scatterKernel(
char* output,
const char* updates,
const int* indices,
int pitch,
int rowSize)
{
int idx = indices[blockIdx.x];
char* pDst = (char*)output + idx * pitch;
const char* pSrc = updates + blockIdx.x * rowSize;
memcpy(pDst, pSrc, rowSize);
}
// Transform nd index to 1 - d index
__global__ void transformIdxKernel(
int* output,
const int* transformCoeff, // these are actually the output pitches of the respective dimensions
const int* indices,
int sliceRank)
{
const int* idx = indices + sliceRank * blockIdx.x;
int transformedIdx = 0;
for (int i = 0; i < sliceRank; i++)
{
transformedIdx += idx[i] * transformCoeff[i];
}
output[blockIdx.x] = transformedIdx;
}
pluginStatus_t scatterNDInference(
hipStream_t stream,
int* transformCoeff,
int nOutputDims,
int sliceRank,
int nRows,
int rowSize,
int copySize,
int sizeOfElementInBytes,
const void* index,
const void* updates,
const void* data,
void* output,
void* workspace)
{
const int* _index = (const int*)(index);
const char* _updates = (const char*)(updates);
char* _output = (char*)(output);
int* wo = (int*)(workspace);
int* transformedIdx = wo + sizeof(int)*nOutputDims;
int* deviceTransformCoeff = wo;
CSC(hipMemcpy(workspace, transformCoeff, sizeof(int) * nOutputDims, hipMemcpyHostToDevice), STATUS_FAILURE);
hipLaunchKernelGGL(( transformIdxKernel), dim3(nRows), dim3(1), 0, stream, transformedIdx, deviceTransformCoeff, _index, sliceRank);
CSC(hipMemcpy(output, data, copySize, hipMemcpyDeviceToDevice), STATUS_FAILURE);
// assuming output pitch = rowSize i.e no padding
hipLaunchKernelGGL(( scatterKernel), dim3(nRows), dim3(1), 0, stream, _output, _updates, transformedIdx, rowSize * 4, rowSize * 4);
return STATUS_SUCCESS;
} | b3949d9665c358202bdc2dfc6c595ee9469bc0cc.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/kernel.h"
#define CUBLAS_CHECK(condition) \
do \
{ \
cublasStatus_t status = condition; \
if (status != CUBLAS_STATUS_SUCCESS) \
{ \
printf("%s %d CUBLAS FAIL %s\n", __FILE__, __LINE__, cublasGetErrorString(status)); \
} \
} while (0)
// this scatter kernel works on a 2d table writing rows
// index is 1-D array
// updates is 2-D array
// output is 2-D array
// output[index[i]] = updates[i]
__global__ void scatterKernel(
char* output,
const char* updates,
const int* indices,
int pitch,
int rowSize)
{
int idx = indices[blockIdx.x];
char* pDst = (char*)output + idx * pitch;
const char* pSrc = updates + blockIdx.x * rowSize;
memcpy(pDst, pSrc, rowSize);
}
// Transform nd index to 1 - d index
__global__ void transformIdxKernel(
int* output,
const int* transformCoeff, // these are actually the output pitches of the respective dimensions
const int* indices,
int sliceRank)
{
const int* idx = indices + sliceRank * blockIdx.x;
int transformedIdx = 0;
for (int i = 0; i < sliceRank; i++)
{
transformedIdx += idx[i] * transformCoeff[i];
}
output[blockIdx.x] = transformedIdx;
}
pluginStatus_t scatterNDInference(
cudaStream_t stream,
int* transformCoeff,
int nOutputDims,
int sliceRank,
int nRows,
int rowSize,
int copySize,
int sizeOfElementInBytes,
const void* index,
const void* updates,
const void* data,
void* output,
void* workspace)
{
const int* _index = (const int*)(index);
const char* _updates = (const char*)(updates);
char* _output = (char*)(output);
int* wo = (int*)(workspace);
int* transformedIdx = wo + sizeof(int)*nOutputDims;
int* deviceTransformCoeff = wo;
CSC(cudaMemcpy(workspace, transformCoeff, sizeof(int) * nOutputDims, cudaMemcpyHostToDevice), STATUS_FAILURE);
transformIdxKernel<<<nRows, 1, 0, stream>>>(transformedIdx, deviceTransformCoeff, _index, sliceRank);
CSC(cudaMemcpy(output, data, copySize, cudaMemcpyDeviceToDevice), STATUS_FAILURE);
// assuming output pitch = rowSize i.e no padding
scatterKernel<<<nRows, 1, 0, stream>>>(_output, _updates, transformedIdx, rowSize * 4, rowSize * 4);
return STATUS_SUCCESS;
} |
cc83e3affa84e461288917179e1a6f2e9d467569.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "devarea.hpp"
#include "protocol.hpp"
#include "debug.h"
#include "ptx_stub.h"
__device__ DeviceArea DEVICE_AREA_GLOBAL_NAME;
extern "C" __global__ void INIT_FUNCTION_NAME(DeviceArea device_area)
{
memcpy(&DEVICE_AREA_GLOBAL_NAME, &device_area, sizeof(device_area));
for(int i = 0; i < DEVICE_AREA_GLOBAL_NAME.numq(); ++ i)
{
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(i);
pcheader->read_head = 0;
pcheader->write_head = 0;
pcheader->tail = 0;
DEBUGONLY(printf("PC Buffer %i initialized, at %p, size is: %i\n", i, pcheader, DEVICE_AREA_GLOBAL_NAME.qbuf_size());)
}
__threadfence_system();
}
static __device__ FINLINE unsigned int __ptx_laneid()
{
unsigned int value;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(value));
return value;
}
extern "C" __device__ __attribute__((noinline)) uint64_t GETTID_FUNCTION_NAME(int streamid)
{
return BUILD_ADDRESS(streamid,
(blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z),
((threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x));
}
extern "C" __device__ __attribute__((noinline)) void STORE_OP_FUNCTION_NAME(const uint64_t tid, const uint64_t address, const uint32_t op, const uint32_t loc_id)
{
const unsigned int active = __ballot(1);
const unsigned int myidx = __ptx_laneid();
const unsigned int ldridx = __ffs(active) - 1;
const int qid = blockIdx.x % DEVICE_AREA_GLOBAL_NAME.numq(); // XXX: change to SM number
const int size = DEVICE_AREA_GLOBAL_NAME.qbuf_size();
int pos = 0;
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(qid);
PCRecord* pcstart = DeviceArea::start(pcheader);
PCRecord* record = NULL;
if(ldridx == myidx)
{
volatile unsigned int* tail = (volatile unsigned int*)&pcheader->tail;
pos = atomicAdd(&pcheader->write_head, 1);
while((pos - *tail) >= size)
__threadfence_system();
}
pos = __shfl(pos, ldridx);
record = pcstart + (pos % size);
DEBUGONLY(printf("bi=%i ti=%i myidx=%i ldridx=%i pos=%i record=%p ra=%p\n", blockIdx.x, threadIdx.x, myidx, ldridx, pos, record, &record->address[myidx]);)
record->address[myidx] = address;
if(ldridx == myidx)
{
record->tid = tid;
record->active = active;
record->op_state= (__isGlobal((void*)address) ? GLOBAL_FLAG : 0) | op;
record->loc_id = loc_id;
while(atomicCAS(&pcheader->read_head, pos, pos + 1) != pos)
__threadfence();
}
__threadfence_system();
}
extern "C" __global__ void force_function_linking(uint64_t* tid)
{
*tid = GETTID_FUNCTION_NAME(0x1234);
STORE_OP_FUNCTION_NAME(*tid, NULL, OP_SYNCTHREADS, 1);
}
int main (int argc, char* argv[])
{
uint64_t* x;
if(0 != hipMalloc(&x, sizeof(uint64_t)))
{
printf("Failed hipMalloc().\n");
return 1;
}
void* buf;
int buf_size = 64 * 1000;
if(0 != hipMalloc(&buf, buf_size))
{
printf("Failed hipMalloc().\n");
return 1;
}
DeviceArea devarea(buf, buf_size, 2);
hipLaunchKernelGGL(( INIT_FUNCTION_NAME), dim3(1),dim3(1), 0, 0, devarea);
int sync = hipDeviceSynchronize();
if(sync != 0)
{
printf("%s failed, err=%i\n", NAMEOF_INIT_FUNCTION_NAME, sync);
return 2;
}
hipLaunchKernelGGL(( force_function_linking), dim3(1),dim3(1), 0, 0, x);
sync = hipDeviceSynchronize();
if(sync != 0)
{
printf("Link function failed, err=%i\n", sync);
return 2;
}
printf("PTX stubs tested OK!\n");
return 0;
}
| cc83e3affa84e461288917179e1a6f2e9d467569.cu | #include <cuda_runtime.h>
#include "devarea.hpp"
#include "protocol.hpp"
#include "debug.h"
#include "ptx_stub.h"
__device__ DeviceArea DEVICE_AREA_GLOBAL_NAME;
extern "C" __global__ void INIT_FUNCTION_NAME(DeviceArea device_area)
{
memcpy(&DEVICE_AREA_GLOBAL_NAME, &device_area, sizeof(device_area));
for(int i = 0; i < DEVICE_AREA_GLOBAL_NAME.numq(); ++ i)
{
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(i);
pcheader->read_head = 0;
pcheader->write_head = 0;
pcheader->tail = 0;
DEBUGONLY(printf("PC Buffer %i initialized, at %p, size is: %i\n", i, pcheader, DEVICE_AREA_GLOBAL_NAME.qbuf_size());)
}
__threadfence_system();
}
static __device__ FINLINE unsigned int __ptx_laneid()
{
unsigned int value;
asm volatile("mov.u32 %0, %%laneid;" : "=r"(value));
return value;
}
extern "C" __device__ __attribute__((noinline)) uint64_t GETTID_FUNCTION_NAME(int streamid)
{
return BUILD_ADDRESS(streamid,
(blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z),
((threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x));
}
extern "C" __device__ __attribute__((noinline)) void STORE_OP_FUNCTION_NAME(const uint64_t tid, const uint64_t address, const uint32_t op, const uint32_t loc_id)
{
const unsigned int active = __ballot(1);
const unsigned int myidx = __ptx_laneid();
const unsigned int ldridx = __ffs(active) - 1;
const int qid = blockIdx.x % DEVICE_AREA_GLOBAL_NAME.numq(); // XXX: change to SM number
const int size = DEVICE_AREA_GLOBAL_NAME.qbuf_size();
int pos = 0;
PCHeader* pcheader = DEVICE_AREA_GLOBAL_NAME.header(qid);
PCRecord* pcstart = DeviceArea::start(pcheader);
PCRecord* record = NULL;
if(ldridx == myidx)
{
volatile unsigned int* tail = (volatile unsigned int*)&pcheader->tail;
pos = atomicAdd(&pcheader->write_head, 1);
while((pos - *tail) >= size)
__threadfence_system();
}
pos = __shfl(pos, ldridx);
record = pcstart + (pos % size);
DEBUGONLY(printf("bi=%i ti=%i myidx=%i ldridx=%i pos=%i record=%p ra=%p\n", blockIdx.x, threadIdx.x, myidx, ldridx, pos, record, &record->address[myidx]);)
record->address[myidx] = address;
if(ldridx == myidx)
{
record->tid = tid;
record->active = active;
record->op_state= (__isGlobal((void*)address) ? GLOBAL_FLAG : 0) | op;
record->loc_id = loc_id;
while(atomicCAS(&pcheader->read_head, pos, pos + 1) != pos)
__threadfence();
}
__threadfence_system();
}
extern "C" __global__ void force_function_linking(uint64_t* tid)
{
*tid = GETTID_FUNCTION_NAME(0x1234);
STORE_OP_FUNCTION_NAME(*tid, NULL, OP_SYNCTHREADS, 1);
}
int main (int argc, char* argv[])
{
uint64_t* x;
if(0 != cudaMalloc(&x, sizeof(uint64_t)))
{
printf("Failed cudaMalloc().\n");
return 1;
}
void* buf;
int buf_size = 64 * 1000;
if(0 != cudaMalloc(&buf, buf_size))
{
printf("Failed cudaMalloc().\n");
return 1;
}
DeviceArea devarea(buf, buf_size, 2);
INIT_FUNCTION_NAME<<<1,1>>>(devarea);
int sync = cudaDeviceSynchronize();
if(sync != 0)
{
printf("%s failed, err=%i\n", NAMEOF_INIT_FUNCTION_NAME, sync);
return 2;
}
force_function_linking<<<1,1>>>(x);
sync = cudaDeviceSynchronize();
if(sync != 0)
{
printf("Link function failed, err=%i\n", sync);
return 2;
}
printf("PTX stubs tested OK!\n");
return 0;
}
|
000755fba84a629628647f07fe897abf17acaa6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
void image_save(int *original, int rows, int cols, char* name)
{
unsigned char * image_final = (unsigned char*)calloc(rows*cols, sizeof(char)) ;
for(int i = 1 ; i < rows ; i++) {
for (int j = 1 ; j < cols ; j++ ) {
image_final[(i-1)*cols + j-1] = (unsigned char)original[(i-1)*cols + j-1] ;
}
}
stbi_write_png(name, rows, cols, 1, (const void*)image_final, rows);
}
__global__ void get_global_variance(int *local_variance, int *local_mean, int *image, int *image_filter, int *variance) {
int cols = blockDim.x ;
int var = *variance;
int r = blockIdx.x;
int c = threadIdx.x;
if(local_variance[r * cols + c] < var)
local_variance[r * cols + c] = var;
image_filter[r * cols + c] = image[r * cols + c] - (var / local_variance[r * cols + c]) * (image[r * cols + c] - local_mean[r * cols + c]);
}
int get_sum2(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__global__ void square_matrix2(int *image, int *image_sq ) {
int row_id = blockIdx.x ;
int col_id = threadIdx.x ;
int columns = blockDim.x ;
int sum = 0 ;
for(int k = 0; k < columns ; k++)
sum = sum + image[row_id*columns + k]*image[col_id*columns + k] ;
image_sq[row_id *columns + col_id] = sum ;
}
__device__ void square_matrix1(int *mat,int *result ,int rows, int cols) {
int temp_sum = 0 ;
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
temp_sum = 0 ;
for(int k = 0; k < cols; k++)
temp_sum = temp_sum + mat[i*cols + k] * mat[j*cols + k] ;
result[i*cols + j] = temp_sum ;
}
}
}
__device__ int get_sum(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__device__ void get_neighbours(int *image, int *near , int curr_row, int curr_col, int cols) {
int next = 0;
for(int i = curr_row - 1; i < curr_row + 2; i++) {
for(int j = curr_col - 1; j < curr_col + 2; j++) {
near[next] = image[i * cols + j];
next++;
}
}
}
__global__ void compute_local_mean_variance(int *image_pad, int *local_mean, int *local_variance) {
int r = blockIdx.x;
int c = threadIdx.x;
int columns = blockDim.x ;
int near_sq[9] ;
int near[9] ;
if(r != 0 && c != 0) {
get_neighbours(image_pad,near,r, c, columns + 1);
int curr_mean = get_sum(near, 3, 3) / 9;
local_mean[(r - 1) *columns + (c - 1)] = curr_mean;
square_matrix1(near, near_sq , 3, 3);
local_variance[(r - 1) * columns + (c - 1)] = get_sum(near_sq, 3, 3) / 9 - curr_mean;
}
}
__global__ void image_padding(int *image, int *image_pad) {
int r = blockIdx.x;
int c = threadIdx.x;
int rows = gridDim.x ;
int cols = blockDim.x ;
if(r != 0 && c != 0 && r != rows - 1 && c != cols - 1)
image_pad[r*cols + c] = image[(r - 1)*(cols - 1) + c - 1];
else
image_pad[r*cols + c] = 0;
}
__global__ void loadIMG(char *temp_image, int *image) {
int r = blockIdx.x;
int c = threadIdx.x;
int cols = blockDim.x ;
image[r *cols + c] = (int) temp_image[r *cols + c];
}
__global__ void sobel_horizontal(int *image_final, int *image_pad, int *sobel) {
int cols = blockDim.x ;
int rows = gridDim.x ;
int r = blockIdx.x;
int c = threadIdx.x;
int temp = 0;
int near[9] ;
if(r > 0 && c > 0 && r < rows - 1 && c < cols - 1 ) {
get_neighbours(image_pad, near,r, c, cols);
for(int k = 0; k < 9; k++)
temp += near[k] * sobel[k];
image_final[(r - 1)*(cols-1) + (c - 1)] = temp;
}
}
void err(int checker) {
hipError_t errchck = hipGetLastError() ;
if (errchck != hipSuccess )
printf(" %d %s \n" , checker , hipGetErrorString(errchck ) ) ;
}
int main() {
int variance, rows, cols, bpp;
char name[100] ;
// 1) Read the image
unsigned char *temp_image = stbi_load("logo.png", &rows, &cols, &bpp, 1);
int image[rows * cols];
// Parallel conversion of char image to int image
int *p_image;
char *p_temp_image;
int checkers = 0 ;
hipMalloc((void **)&p_image, sizeof(int) * rows * cols);
hipMalloc((void **)&p_temp_image, sizeof(char) * rows * cols);
hipMemcpy(p_temp_image, temp_image, sizeof(char) * rows * cols, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( loadIMG), dim3(rows), dim3(cols), 0, 0, p_temp_image, p_image);
// Declarations
int *image_sq = (int *)malloc(sizeof(int) * rows * cols);
int sobel[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
int image_filter[rows * cols];
// 2) Padding the Image
int *p_image_pad;
hipMalloc((void **)&p_image_pad, sizeof(int) * (rows + 1) * (cols + 1));
rows += 1;
cols += 1 ;
hipLaunchKernelGGL(( image_padding), dim3(rows),dim3(cols), 0, 0, p_image, p_image_pad);
err(100) ;
rows -= 1;
cols -= 1;
// 3) Computing Local Mean and Local Variance
int *p_local_mean, *p_local_variance;
hipMalloc((void **)&p_local_mean, sizeof(int)*rows*cols);
hipMalloc((void **)&p_local_variance, sizeof(int)*rows*cols);
hipLaunchKernelGGL(( compute_local_mean_variance), dim3(rows), dim3(cols), 0, 0, p_image_pad, p_local_mean, p_local_variance);
// 4) Get Global Variance
int *p_image_sq;
hipMalloc((void **)&p_image_sq, sizeof(int) * rows * cols);
hipLaunchKernelGGL(( square_matrix2), dim3(rows), dim3(cols), 0, 0, p_image, p_image_sq);
hipMemcpy(image_sq, p_image_sq, sizeof(int) * rows * cols, hipMemcpyDeviceToHost);
hipFree(p_image_sq);
// Get Sum2 Function doesn't need to be parallelized
variance = get_sum2(image_sq , 3, 3) - get_sum2(image , 3 , 3) ;
variance = variance / (rows * cols);
int *p_image_filter, *p_variance;
hipMalloc((void **)&p_image_filter, sizeof(int) * rows * cols);
hipMalloc((void **)&p_variance, sizeof(int));
hipMemcpy(p_variance, &variance, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( get_global_variance), dim3(rows), dim3(cols), 0, 0, p_local_variance, p_local_mean, p_image, p_image_filter, p_variance);
hipMemcpy(image_filter, p_image_filter, sizeof(int) * rows * cols, hipMemcpyDeviceToHost);
strcpy(name, "noise_removed.png");
image_save(image_filter, rows, cols, name);
hipDeviceSynchronize() ;
// 5) Apply horizontal sobel filter for edge detection
rows += 1; /* Investigate this further */
cols += 1;
hipLaunchKernelGGL(( image_padding), dim3(rows), dim3(cols), 0, 0, p_image_filter, p_image_pad);
hipDeviceSynchronize() ;
rows -= 1;
cols -= 1;
hipFree(p_local_variance);
hipFree(p_local_mean);
hipFree(p_image);
int image_final[rows*cols] ;
int *p_image_final;
int *p_sobel;
hipMalloc((void **)&p_image_final, sizeof(int)*rows*cols);
hipMalloc((void **)&p_sobel, sizeof(int) * 9);
hipMemcpy(p_sobel, sobel, sizeof(int) * 9, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sobel_horizontal), dim3(rows+1), dim3(cols+1), 0, 0, p_image_final, p_image_pad, p_sobel);
hipMemcpy(image_final, p_image_final, sizeof(int)*rows*cols, hipMemcpyDeviceToHost);
err(checkers++) ;
printf("\n\nFunction 5.2 , %d \n\n" , checkers);
strcpy(name, "edge_det.png");
image_save(image_final, rows, cols, name);
printf(" Processing complete , open edge_det.png to see result edge detected image \n");
hipFree(p_sobel);
hipFree(p_image_pad);
hipFree(p_image_final);
hipFree(p_local_variance);
hipFree(p_local_mean);
hipFree(p_image);
return 0 ;
} | 000755fba84a629628647f07fe897abf17acaa6d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#define STB_IMAGE_IMPLEMENTATION
#define STB_IMAGE_WRITE_IMPLEMENTATION
void image_save(int *original, int rows, int cols, char* name)
{
unsigned char * image_final = (unsigned char*)calloc(rows*cols, sizeof(char)) ;
for(int i = 1 ; i < rows ; i++) {
for (int j = 1 ; j < cols ; j++ ) {
image_final[(i-1)*cols + j-1] = (unsigned char)original[(i-1)*cols + j-1] ;
}
}
stbi_write_png(name, rows, cols, 1, (const void*)image_final, rows);
}
__global__ void get_global_variance(int *local_variance, int *local_mean, int *image, int *image_filter, int *variance) {
int cols = blockDim.x ;
int var = *variance;
int r = blockIdx.x;
int c = threadIdx.x;
if(local_variance[r * cols + c] < var)
local_variance[r * cols + c] = var;
image_filter[r * cols + c] = image[r * cols + c] - (var / local_variance[r * cols + c]) * (image[r * cols + c] - local_mean[r * cols + c]);
}
int get_sum2(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__global__ void square_matrix2(int *image, int *image_sq ) {
int row_id = blockIdx.x ;
int col_id = threadIdx.x ;
int columns = blockDim.x ;
int sum = 0 ;
for(int k = 0; k < columns ; k++)
sum = sum + image[row_id*columns + k]*image[col_id*columns + k] ;
image_sq[row_id *columns + col_id] = sum ;
}
__device__ void square_matrix1(int *mat,int *result ,int rows, int cols) {
int temp_sum = 0 ;
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
temp_sum = 0 ;
for(int k = 0; k < cols; k++)
temp_sum = temp_sum + mat[i*cols + k] * mat[j*cols + k] ;
result[i*cols + j] = temp_sum ;
}
}
}
__device__ int get_sum(int *arr, int rows, int cols) {
int temp_sum = 0;
for(int i = 0; i < rows; i++)
for(int j = 0; j < cols; j++)
temp_sum += arr[i * cols + j];
return temp_sum;
}
__device__ void get_neighbours(int *image, int *near , int curr_row, int curr_col, int cols) {
int next = 0;
for(int i = curr_row - 1; i < curr_row + 2; i++) {
for(int j = curr_col - 1; j < curr_col + 2; j++) {
near[next] = image[i * cols + j];
next++;
}
}
}
__global__ void compute_local_mean_variance(int *image_pad, int *local_mean, int *local_variance) {
int r = blockIdx.x;
int c = threadIdx.x;
int columns = blockDim.x ;
int near_sq[9] ;
int near[9] ;
if(r != 0 && c != 0) {
get_neighbours(image_pad,near,r, c, columns + 1);
int curr_mean = get_sum(near, 3, 3) / 9;
local_mean[(r - 1) *columns + (c - 1)] = curr_mean;
square_matrix1(near, near_sq , 3, 3);
local_variance[(r - 1) * columns + (c - 1)] = get_sum(near_sq, 3, 3) / 9 - curr_mean;
}
}
__global__ void image_padding(int *image, int *image_pad) {
int r = blockIdx.x;
int c = threadIdx.x;
int rows = gridDim.x ;
int cols = blockDim.x ;
if(r != 0 && c != 0 && r != rows - 1 && c != cols - 1)
image_pad[r*cols + c] = image[(r - 1)*(cols - 1) + c - 1];
else
image_pad[r*cols + c] = 0;
}
__global__ void loadIMG(char *temp_image, int *image) {
int r = blockIdx.x;
int c = threadIdx.x;
int cols = blockDim.x ;
image[r *cols + c] = (int) temp_image[r *cols + c];
}
__global__ void sobel_horizontal(int *image_final, int *image_pad, int *sobel) {
int cols = blockDim.x ;
int rows = gridDim.x ;
int r = blockIdx.x;
int c = threadIdx.x;
int temp = 0;
int near[9] ;
if(r > 0 && c > 0 && r < rows - 1 && c < cols - 1 ) {
get_neighbours(image_pad, near,r, c, cols);
for(int k = 0; k < 9; k++)
temp += near[k] * sobel[k];
image_final[(r - 1)*(cols-1) + (c - 1)] = temp;
}
}
void err(int checker) {
cudaError_t errchck = cudaGetLastError() ;
if (errchck != cudaSuccess )
printf(" %d %s \n" , checker , cudaGetErrorString(errchck ) ) ;
}
int main() {
int variance, rows, cols, bpp;
char name[100] ;
// 1) Read the image
unsigned char *temp_image = stbi_load("logo.png", &rows, &cols, &bpp, 1);
int image[rows * cols];
// Parallel conversion of char image to int image
int *p_image;
char *p_temp_image;
int checkers = 0 ;
cudaMalloc((void **)&p_image, sizeof(int) * rows * cols);
cudaMalloc((void **)&p_temp_image, sizeof(char) * rows * cols);
cudaMemcpy(p_temp_image, temp_image, sizeof(char) * rows * cols, cudaMemcpyHostToDevice);
loadIMG<<<rows, cols>>>(p_temp_image, p_image);
// Declarations
int *image_sq = (int *)malloc(sizeof(int) * rows * cols);
int sobel[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1};
int image_filter[rows * cols];
// 2) Padding the Image
int *p_image_pad;
cudaMalloc((void **)&p_image_pad, sizeof(int) * (rows + 1) * (cols + 1));
rows += 1;
cols += 1 ;
image_padding<<<rows,cols>>>(p_image, p_image_pad);
err(100) ;
rows -= 1;
cols -= 1;
// 3) Computing Local Mean and Local Variance
int *p_local_mean, *p_local_variance;
cudaMalloc((void **)&p_local_mean, sizeof(int)*rows*cols);
cudaMalloc((void **)&p_local_variance, sizeof(int)*rows*cols);
compute_local_mean_variance<<<rows, cols>>>(p_image_pad, p_local_mean, p_local_variance);
// 4) Get Global Variance
int *p_image_sq;
cudaMalloc((void **)&p_image_sq, sizeof(int) * rows * cols);
square_matrix2<<<rows, cols>>>(p_image, p_image_sq);
cudaMemcpy(image_sq, p_image_sq, sizeof(int) * rows * cols, cudaMemcpyDeviceToHost);
cudaFree(p_image_sq);
// Get Sum2 Function doesn't need to be parallelized
variance = get_sum2(image_sq , 3, 3) - get_sum2(image , 3 , 3) ;
variance = variance / (rows * cols);
int *p_image_filter, *p_variance;
cudaMalloc((void **)&p_image_filter, sizeof(int) * rows * cols);
cudaMalloc((void **)&p_variance, sizeof(int));
cudaMemcpy(p_variance, &variance, sizeof(int), cudaMemcpyHostToDevice);
get_global_variance<<<rows, cols>>>(p_local_variance, p_local_mean, p_image, p_image_filter, p_variance);
cudaMemcpy(image_filter, p_image_filter, sizeof(int) * rows * cols, cudaMemcpyDeviceToHost);
strcpy(name, "noise_removed.png");
image_save(image_filter, rows, cols, name);
cudaDeviceSynchronize() ;
// 5) Apply horizontal sobel filter for edge detection
rows += 1; /* Investigate this further */
cols += 1;
image_padding<<<rows, cols>>>(p_image_filter, p_image_pad);
cudaDeviceSynchronize() ;
rows -= 1;
cols -= 1;
cudaFree(p_local_variance);
cudaFree(p_local_mean);
cudaFree(p_image);
int image_final[rows*cols] ;
int *p_image_final;
int *p_sobel;
cudaMalloc((void **)&p_image_final, sizeof(int)*rows*cols);
cudaMalloc((void **)&p_sobel, sizeof(int) * 9);
cudaMemcpy(p_sobel, sobel, sizeof(int) * 9, cudaMemcpyHostToDevice);
sobel_horizontal<<<rows+1, cols+1>>>(p_image_final, p_image_pad, p_sobel);
cudaMemcpy(image_final, p_image_final, sizeof(int)*rows*cols, cudaMemcpyDeviceToHost);
err(checkers++) ;
printf("\n\nFunction 5.2 , %d \n\n" , checkers);
strcpy(name, "edge_det.png");
image_save(image_final, rows, cols, name);
printf(" Processing complete , open edge_det.png to see result edge detected image \n");
cudaFree(p_sobel);
cudaFree(p_image_pad);
cudaFree(p_image_final);
cudaFree(p_local_variance);
cudaFree(p_local_mean);
cudaFree(p_image);
return 0 ;
} |
welford.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include "type_shim.h"
__device__ __forceinline__ int lastpow2(int n)
{
int out = 1 << (31 - __clz(n));
if(n == out)
out >>= 1;
return out;
}
__host__ __forceinline__ int h_next_pow2(unsigned int n) {
n--;
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ++n;
}
__host__ __forceinline__ int h_last_pow2(unsigned int n) {
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return n - (n >> 1);
}
#define WARP_SIZE 32
template<typename T>
__device__ __forceinline__ T warp_reduce_sum(T val)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1)
val = val + __shfl_down_sync(0xffffffff, val, i);
return val;
}
template<typename T>
__device__ __forceinline__ T reduce_block(T *x, T val)
{
int tid = threadIdx.y*blockDim.x + threadIdx.x;
int blockSize = blockDim.x * blockDim.y;
if (blockSize > 32) {
val = warp_reduce_sum(val);
if (tid % WARP_SIZE == 0)
x[tid/WARP_SIZE] = val;
__syncthreads();
val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0));
}
if(tid/WARP_SIZE==0) val = warp_reduce_sum(val);
return val;
}
#define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency
#define ELEMENTS_PER_THREAD 16
#define OPTIMAL_TILE_W 32
#define MAX_H_BLOCK 128
#define MAX_BLOCK_SIZE 512
__host__ int div_ru(int x, int y) {
return h_last_pow2(1 + (x-1)/y);
}
__host__ void flexible_launch_configs(
const int reduction,
const int stride,
dim3 &block,
dim3 &grid,
const bool coop_flag = false) {
int block_x = ::min(h_last_pow2(stride), OPTIMAL_TILE_W);
int block_y = ::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)),
MAX_BLOCK_SIZE / block_x);
if (block_x * block_y != MAX_BLOCK_SIZE) {
block_x = ::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y);
}
int grid_x = div_ru(stride, block_x);
int grid_y = ::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK);
if (coop_flag) {
// it's not worth having a grid reduction if the reduction dimension is not big enough
grid_y = grid_y < 8 ? 1 : grid_y;
}
block.x = block_x;
block.y = block_y;
block.z = 1;
grid.x = grid_x;
grid.y = grid_y;
grid.z = 1;
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_element(C& count,
T& mean,
T& m2n,
const C& num_new,
const T& mean_new,
const T& m2n_new) {
T factor = T(1.0) / max(1, (count + num_new));
T delta0 = mean - mean_new;
mean = (mean_new * num_new + mean * count) * factor;
m2n += m2n_new + delta0 * delta0 * num_new * count * factor;
count += num_new;
}
template<typename T>
__device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1) {
auto num_new = __shfl_down_sync(0xffffffff, num, i);
auto mean_new = __shfl_down_sync(0xffffffff, mean, i);
auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i);
welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new);
}
}
template <typename T>
__device__ void welford_reduce_mean_m2n(
T* __restrict__ x,
int* __restrict__ count,
T &mean,
T &m2n,
int &num,
int block_size,
int thread_id)
{
int lane = thread_id % WARP_SIZE;
int wid = thread_id / WARP_SIZE;
if (block_size > 32) {
warp_reduce_mean_m2n(mean, m2n, num);
if (lane == 0) {
x[wid*2] = mean;
x[wid*2+1] = m2n;
count[wid] = num;
}
__syncthreads();
if (wid == 0) {
mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0);
m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0);
num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0);
}
}
if (wid==0) warp_reduce_mean_m2n(mean, m2n, num);
return;
}
// return spatial size for NC+ Tensors
__host__ int get_tensor_spatial_size(const at::Tensor& input)
{
auto space_size = input.size(2);
for (int i = 3; i < input.ndimension(); i++) {
space_size *= input.size(i);
}
return space_size;
}
// promote accumulation scalar type. promote half to float.
__host__ at::ScalarType promote_scalartype(const at::Tensor& input)
{
return input.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : input.scalar_type();
}
// return single element size, optional accumulation type promotion.
__host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false)
{
auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type();
return at::elementSize(scalar_type);
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_block_vertical(C& count,
T& mean,
T& m2n,
C* shmem_count,
T* shmem_mean,
T* shmem_m2n) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
// read shared memory back to register for reduction
auto num_new = shmem_count[address];
auto mean_new = shmem_mean[address];
auto m2n_new = shmem_m2n[address];
welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new);
// last write is not necessary
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
}
}
}
template<typename T>
__device__ __forceinline__ void merge_block_vertical(T& sum_dy,
T& sum_dy_xmu,
T* shmem_sum_dy,
T* shmem_sum_dy_xmu) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
sum_dy += shmem_sum_dy[address];
sum_dy_xmu += shmem_sum_dy_xmu[address];
// last write is not necessary
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
}
}
}
// welford kernel calculating mean/biased_variance/unbiased_variance
template <typename scalar_t, typename accscalar_t, typename outscalar_t>
__global__ void welford_kernel(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
const int bs,
const int fs,
const int ss) {
int block_size = blockDim.x * blockDim.y;
int count = 0;
accscalar_t x_mean = accscalar_t(0);
accscalar_t m_2_n = accscalar_t(0);
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
// sequential welford
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
count++;
auto x_n = static_cast<accscalar_t>(input[offset+input_base]);
auto d = x_n - x_mean;
x_mean += d / count;
m_2_n += d * (x_n - x_mean);
}
}
static __shared__ int s_mem[160];
accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32];
welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id);
if (thread_id == 0) {
out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean);
out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count);
}
}
// elementwise BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_forward_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int ss,
const int bs) {
auto m_c = mean[blockIdx.x];
auto inv_std_c = inv_std[blockIdx.x];
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]);
for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c);
}
}
}
// Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate
// results to calculating grad_input.
// Breaking the grad_input to two step to support sync BN, which requires all
// reduce of the intermediate results across processes.
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void reduce_bn_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
const int bs,
const int fs,
const int ss) {
static __shared__ int s_mem[64];
int total_item_num = bs * ss;
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
auto r_mean = mean[blockIdx.x];
auto factor = inv_std[blockIdx.x];
// Kahan sum
accscalar_t sum_dy = 0.0;
accscalar_t sum_dy_xmu = 0.0;
accscalar_t sum_dy_c = 0.0;
accscalar_t sum_dy_xmu_c = 0.0;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]);
auto e_input = static_cast<accscalar_t>(input[offset+input_base]);
// calculating sum_dy
auto sum_dy_y = e_grad - sum_dy_c;
auto sum_dy_t = sum_dy + sum_dy_y;
sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y;
sum_dy = sum_dy_t;
// calculating sum_dy_xmu
auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c;
auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y;
sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y;
sum_dy_xmu = sum_dy_xmu_t;
}
}
sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy);
__syncthreads();
sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu);
if (thread_id == 0) {
if (grad_bias != NULL) {
grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy);
}
if (grad_weight != NULL) {
grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor);
}
mean_dy[blockIdx.x] = sum_dy / total_item_num;
mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num;
}
}
// elementwise backward BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_backward_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int ss,
const int bs) {
auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]);
auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]);
auto factor_1_c = inv_std[blockIdx.x];
auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x];
for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c;
}
}
}
// welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance
template
<typename scalar_t,
typename accscalar_t,
typename outscalar_t,
int PARALLEL_LOADS>
__global__ void
welford_kernel_c_last(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t x_mean[PARALLEL_LOADS];
accscalar_t m_2_n[PARALLEL_LOADS];
int count[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
x_mean[i] = accscalar_t(0);
m_2_n[i] = accscalar_t(0);
count[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
accscalar_t x_math[PARALLEL_LOADS];
accscalar_t x_count_inv[PARALLEL_LOADS];
accscalar_t is_valid[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_math[j] = input[address_base];
count[j]++;
x_count_inv[j] = accscalar_t(1) / count[j];
is_valid[j] = accscalar_t(1);
} else {
x_math[j] = accscalar_t(0);
x_count_inv[j] = accscalar_t(0);
is_valid[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate mean/m2n with welford
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
accscalar_t delta0 = x_math[j] - x_mean[j];
x_mean[j] += delta0 * x_count_inv[j];
accscalar_t delta1 = x_math[j] - x_mean[j];
m_2_n[j] += delta0 * delta1 * is_valid[j];
}
}
// thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]);
}
// release x_mean / m_2_n
auto mean_th = x_mean[0];
auto m2_th = m_2_n[0];
auto count_th = count[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE];
static __shared__ int shmem_count[MAX_BLOCK_SIZE];
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_mean = staging_data;
volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y];
volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]);
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_mean[address_base] = mean_th;
staging_m2n[address_base] = m2_th;
staging_count[address_base] = count_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
count_th = 0;
mean_th = accscalar_t(0.0);
m2_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
int num_new = c_offset < stride ? staging_count[address_base] : 0;
accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0);
accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0);
welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new);
}
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
if (threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
}
// parallel welford kernel to further reduce mean / biased_var
// into mean / unbiased_var / inv_std across multiple processes.
template <typename scalar_t>
__global__ void welford_kernel_parallel(
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ var_biased,
scalar_t* __restrict__ out_mean,
scalar_t* __restrict__ out_var,
scalar_t* __restrict__ inv_std,
const int world_size,
const int feature_size,
const float eps,
const int numel) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) {
// load data;
int address = i;
scalar_t x_mean = 0;
scalar_t m_2_n = 0;
int count = 0;
for (int j = 0; j < world_size; j++) {
welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel);
address += feature_size;
}
out_mean[i] = x_mean;
out_var[i] = m_2_n/ (count - 1);
inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps);
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_forward_c_last_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
out[address_base] = static_cast<scalar_t>(
w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c
);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// batchnorm backward kernel for c last tensor
template
<typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void reduce_bn_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t sum_dy[PARALLEL_LOADS];
accscalar_t sum_dy_xmu[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
sum_dy[i] = accscalar_t(0);
sum_dy_xmu[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
auto r_mean = mean[c_offset];
auto factor = inv_std[c_offset];
for (int i = 0; i < loop_count; i++) {
accscalar_t x_input[PARALLEL_LOADS];
accscalar_t x_grad_output[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_input[j] = input[address_base];
x_grad_output[j] = grad_output[address_base];
} else {
x_input[j] = accscalar_t(0);
x_grad_output[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate sum_dy / sum_dy_xmu
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
sum_dy[j] += x_grad_output[j];
sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean);
}
}
// thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
sum_dy[0] += sum_dy[j];
sum_dy_xmu[0] += sum_dy_xmu[j];
}
// release array of registers
auto sum_dy_th = sum_dy[0];
auto sum_dy_xmu_th = sum_dy_xmu[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE];
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_sum_dy = staging_data;
volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y];
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_sum_dy[address_base] = sum_dy_th;
staging_sum_dy_xmu[address_base] = sum_dy_xmu_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
sum_dy_th = accscalar_t(0.0);
sum_dy_xmu_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0));
sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0));
}
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
if (threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto m_dy_c = mean_dy[c_offset];
auto factor_1_c = inv_std[c_offset];
auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset];
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
grad_input[address_base] = static_cast<scalar_t>(
(static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c -
(static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c)
* factor_2_c);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto space_size = get_tensor_spatial_size(input);
auto scalar_type = promote_scalartype(input);
at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type));
at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type));
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32));
int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( welford_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
batch_size,
feature_size,
space_size);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor out = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight)
{
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto scalar_type = promote_scalartype(input);
at::Tensor mean_dy = at::empty({feature_size}, mean.options());
at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({feature_size}, weight.value().options());
grad_bias = at::empty({feature_size}, weight.value().options());
} else {
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
auto space_size = get_tensor_spatial_size(input);
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32));
int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ? grad_bias.data<accscalar_t>() : NULL,
batch_size,
feature_size,
space_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL,
batch_size,
feature_size,
space_size);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor grad_input = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t>), dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0>), dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return grad_input;
}
std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes,
const at::Tensor var_biased,
int numel,
const float eps) {
const auto world_size = mean_feature_nodes.size(0);
const auto feature_size = mean_feature_nodes.size(1);
at::Tensor out_var = at::empty({feature_size}, var_biased.options());
at::Tensor inv_std = at::empty_like(out_var);
at::Tensor out_mean = at::empty_like(out_var);
// TODO(jie): tile this for memory coalescing!
const int block = ::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE);
const int grid = std::max<int>(1, feature_size / block);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel",
hipLaunchKernelGGL(( welford_kernel_parallel<scalar_t_0>), dim3(grid), dim3(block), 0, stream,
mean_feature_nodes.data<scalar_t_0>(),
var_biased.data<scalar_t_0>(),
out_mean.data<scalar_t_0>(),
out_var.data<scalar_t_0>(),
inv_std.data<scalar_t_0>(),
world_size,
feature_size,
eps,
numel);
);
}
return {out_mean, out_var, inv_std};
}
std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
auto scalar_type = promote_scalartype(input);
auto option = input.options().dtype(scalar_type);
at::Tensor out_var_biased = at::empty({stride}, option);
at::Tensor out_mean = at::empty({stride}, option);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({4*stride*grid.y}, option);
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_c_last_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor mean_dy = at::empty({stride}, mean.options());
at::Tensor mean_dy_xmu = at::empty({stride}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({stride}, weight.value().options());
grad_bias = at::empty({stride}, weight.value().options());
} else {
// because I cannot return an uninitialized at::Tensor
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({2*stride*grid.y}, mean.options());
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value()
&& weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ?grad_bias.data<accscalar_t>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
hipLaunchKernelGGL(( reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor grad_input = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
hipLaunchKernelGGL(( batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>)
, dim3(grid), dim3(block), 0, stream,
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return grad_input;
}
| welford.cu | #include <iostream>
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include "type_shim.h"
__device__ __forceinline__ int lastpow2(int n)
{
int out = 1 << (31 - __clz(n));
if(n == out)
out >>= 1;
return out;
}
__host__ __forceinline__ int h_next_pow2(unsigned int n) {
n--;
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return ++n;
}
__host__ __forceinline__ int h_last_pow2(unsigned int n) {
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return n - (n >> 1);
}
#define WARP_SIZE 32
template<typename T>
__device__ __forceinline__ T warp_reduce_sum(T val)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1)
val = val + __shfl_down_sync(0xffffffff, val, i);
return val;
}
template<typename T>
__device__ __forceinline__ T reduce_block(T *x, T val)
{
int tid = threadIdx.y*blockDim.x + threadIdx.x;
int blockSize = blockDim.x * blockDim.y;
if (blockSize > 32) {
val = warp_reduce_sum(val);
if (tid % WARP_SIZE == 0)
x[tid/WARP_SIZE] = val;
__syncthreads();
val = (tid < blockSize / WARP_SIZE? x[tid%WARP_SIZE] : T(0));
}
if(tid/WARP_SIZE==0) val = warp_reduce_sum(val);
return val;
}
#define ELEMENTS_PER_ITER 4 // enables concurrency within each thread to hide latency
#define ELEMENTS_PER_THREAD 16
#define OPTIMAL_TILE_W 32
#define MAX_H_BLOCK 128
#define MAX_BLOCK_SIZE 512
__host__ int div_ru(int x, int y) {
return h_last_pow2(1 + (x-1)/y);
}
__host__ void flexible_launch_configs(
const int reduction,
const int stride,
dim3 &block,
dim3 &grid,
const bool coop_flag = false) {
int block_x = std::min(h_last_pow2(stride), OPTIMAL_TILE_W);
int block_y = std::min(h_last_pow2(div_ru(reduction , ELEMENTS_PER_THREAD)),
MAX_BLOCK_SIZE / block_x);
if (block_x * block_y != MAX_BLOCK_SIZE) {
block_x = std::min(h_last_pow2(stride), MAX_BLOCK_SIZE / block_y);
}
int grid_x = div_ru(stride, block_x);
int grid_y = std::min(div_ru(reduction, block_y * ELEMENTS_PER_THREAD), MAX_H_BLOCK);
if (coop_flag) {
// it's not worth having a grid reduction if the reduction dimension is not big enough
grid_y = grid_y < 8 ? 1 : grid_y;
}
block.x = block_x;
block.y = block_y;
block.z = 1;
grid.x = grid_x;
grid.y = grid_y;
grid.z = 1;
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_element(C& count,
T& mean,
T& m2n,
const C& num_new,
const T& mean_new,
const T& m2n_new) {
T factor = T(1.0) / max(1, (count + num_new));
T delta0 = mean - mean_new;
mean = (mean_new * num_new + mean * count) * factor;
m2n += m2n_new + delta0 * delta0 * num_new * count * factor;
count += num_new;
}
template<typename T>
__device__ __forceinline__ void warp_reduce_mean_m2n(T &mean, T &m2n, int &num)
{
#pragma unroll
for(int i = WARP_SIZE/2; i > 0; i >>= 1) {
auto num_new = __shfl_down_sync(0xffffffff, num, i);
auto mean_new = __shfl_down_sync(0xffffffff, mean, i);
auto m2n_new = __shfl_down_sync(0xffffffff, m2n, i);
welford_merge_element(num, mean, m2n, num_new, mean_new, m2n_new);
}
}
template <typename T>
__device__ void welford_reduce_mean_m2n(
T* __restrict__ x,
int* __restrict__ count,
T &mean,
T &m2n,
int &num,
int block_size,
int thread_id)
{
int lane = thread_id % WARP_SIZE;
int wid = thread_id / WARP_SIZE;
if (block_size > 32) {
warp_reduce_mean_m2n(mean, m2n, num);
if (lane == 0) {
x[wid*2] = mean;
x[wid*2+1] = m2n;
count[wid] = num;
}
__syncthreads();
if (wid == 0) {
mean = (thread_id < block_size / WARP_SIZE)? x[lane*2] : T(0);
m2n = (thread_id < block_size / WARP_SIZE)? x[lane*2+1] : T(0);
num = (thread_id < block_size / WARP_SIZE)? count[lane] : int(0);
}
}
if (wid==0) warp_reduce_mean_m2n(mean, m2n, num);
return;
}
// return spatial size for NC+ Tensors
__host__ int get_tensor_spatial_size(const at::Tensor& input)
{
auto space_size = input.size(2);
for (int i = 3; i < input.ndimension(); i++) {
space_size *= input.size(i);
}
return space_size;
}
// promote accumulation scalar type. promote half to float.
__host__ at::ScalarType promote_scalartype(const at::Tensor& input)
{
return input.scalar_type() == at::ScalarType::Half ?
at::ScalarType::Float : input.scalar_type();
}
// return single element size, optional accumulation type promotion.
__host__ size_t get_element_data_size(const at::Tensor& input, bool accumulation = false)
{
auto scalar_type = accumulation ? promote_scalartype(input) : input.scalar_type();
return at::elementSize(scalar_type);
}
template<typename T, typename C>
__device__ __forceinline__ void welford_merge_block_vertical(C& count,
T& mean,
T& m2n,
C* shmem_count,
T* shmem_mean,
T* shmem_m2n) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
// read shared memory back to register for reduction
auto num_new = shmem_count[address];
auto mean_new = shmem_mean[address];
auto m2n_new = shmem_m2n[address];
welford_merge_element(count, mean, m2n, num_new, mean_new, m2n_new);
// last write is not necessary
shmem_mean[address_base] = mean;
shmem_m2n[address_base] = m2n;
shmem_count[address_base] = count;
}
}
}
template<typename T>
__device__ __forceinline__ void merge_block_vertical(T& sum_dy,
T& sum_dy_xmu,
T* shmem_sum_dy,
T* shmem_sum_dy_xmu) {
// write to shared memory
auto address_base = threadIdx.x + threadIdx.y * blockDim.x;
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
#pragma unroll
for (int offset = blockDim.y/2; offset > 0; offset >>= 1) {
__syncthreads();
if (threadIdx.y < offset && threadIdx.y + offset < blockDim.y) {
auto address = address_base + offset * blockDim.x;
sum_dy += shmem_sum_dy[address];
sum_dy_xmu += shmem_sum_dy_xmu[address];
// last write is not necessary
shmem_sum_dy[address_base] = sum_dy;
shmem_sum_dy_xmu[address_base] = sum_dy_xmu;
}
}
}
// welford kernel calculating mean/biased_variance/unbiased_variance
template <typename scalar_t, typename accscalar_t, typename outscalar_t>
__global__ void welford_kernel(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
const int bs,
const int fs,
const int ss) {
int block_size = blockDim.x * blockDim.y;
int count = 0;
accscalar_t x_mean = accscalar_t(0);
accscalar_t m_2_n = accscalar_t(0);
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
// sequential welford
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
count++;
auto x_n = static_cast<accscalar_t>(input[offset+input_base]);
auto d = x_n - x_mean;
x_mean += d / count;
m_2_n += d * (x_n - x_mean);
}
}
static __shared__ int s_mem[160];
accscalar_t* s_mem_ac = (accscalar_t*) &s_mem[32];
welford_reduce_mean_m2n<accscalar_t>(s_mem_ac, s_mem, x_mean, m_2_n, count, block_size, thread_id);
if (thread_id == 0) {
out_mean[blockIdx.x] = static_cast<outscalar_t>(x_mean);
out_var_biased[blockIdx.x] = static_cast<outscalar_t>(m_2_n/count);
}
}
// elementwise BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_forward_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int ss,
const int bs) {
auto m_c = mean[blockIdx.x];
auto inv_std_c = inv_std[blockIdx.x];
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[blockIdx.x]);
for (int batch_offset = blockIdx.y*blockDim.y + threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
out[address_base+offset] = static_cast<scalar_t>(w_c * (static_cast<accscalar_t>(input[address_base+offset]) - m_c ) * inv_std_c + s_c);
}
}
}
// Backward BN kernel, calculates grad_bias, grad_weight as well as intermediate
// results to calculating grad_input.
// Breaking the grad_input to two step to support sync BN, which requires all
// reduce of the intermediate results across processes.
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void reduce_bn_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
const int bs,
const int fs,
const int ss) {
static __shared__ int s_mem[64];
int total_item_num = bs * ss;
int thread_id = threadIdx.y*blockDim.x + threadIdx.x;
auto r_mean = mean[blockIdx.x];
auto factor = inv_std[blockIdx.x];
// Kahan sum
accscalar_t sum_dy = 0.0;
accscalar_t sum_dy_xmu = 0.0;
accscalar_t sum_dy_c = 0.0;
accscalar_t sum_dy_xmu_c = 0.0;
for (int batch_id = threadIdx.y; batch_id < bs; batch_id += blockDim.y) {
int input_base = blockIdx.x*ss + batch_id*ss*fs;
for (int offset = threadIdx.x; offset < ss ; offset += blockDim.x) {
auto e_grad = static_cast<accscalar_t>(grad_output[offset+input_base]);
auto e_input = static_cast<accscalar_t>(input[offset+input_base]);
// calculating sum_dy
auto sum_dy_y = e_grad - sum_dy_c;
auto sum_dy_t = sum_dy + sum_dy_y;
sum_dy_c = (sum_dy_t - sum_dy) - sum_dy_y;
sum_dy = sum_dy_t;
// calculating sum_dy_xmu
auto sum_dy_xmu_y = e_grad * (e_input - r_mean) - sum_dy_xmu_c;
auto sum_dy_xmu_t = sum_dy_xmu + sum_dy_xmu_y;
sum_dy_xmu_c = (sum_dy_xmu_t - sum_dy_xmu) - sum_dy_xmu_y;
sum_dy_xmu = sum_dy_xmu_t;
}
}
sum_dy = reduce_block((accscalar_t*)s_mem, sum_dy);
__syncthreads();
sum_dy_xmu = reduce_block((accscalar_t*)s_mem, sum_dy_xmu);
if (thread_id == 0) {
if (grad_bias != NULL) {
grad_bias[blockIdx.x] = static_cast<layerscalar_t>(sum_dy);
}
if (grad_weight != NULL) {
grad_weight[blockIdx.x] = static_cast<layerscalar_t>(sum_dy_xmu * factor);
}
mean_dy[blockIdx.x] = sum_dy / total_item_num;
mean_dy_xmu[blockIdx.x] = sum_dy_xmu / total_item_num;
}
}
// elementwise backward BN kernel
template <typename scalar_t, typename accscalar_t, typename layerscalar_t>
__global__ void batchnorm_backward_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int ss,
const int bs) {
auto m_c = static_cast<accscalar_t>(mean[blockIdx.x]);
auto m_dy_c = static_cast<accscalar_t>(mean_dy[blockIdx.x]);
auto factor_1_c = inv_std[blockIdx.x];
auto factor_2_c = (weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[blockIdx.x])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[blockIdx.x];
for (int batch_offset = blockIdx.y*blockDim.y+threadIdx.y; batch_offset < bs; batch_offset += gridDim.y*blockDim.y) {
int address_base = blockIdx.x*ss + batch_offset*gridDim.x*ss;
for (int offset = threadIdx.x + blockIdx.z*blockDim.x; offset < ss ; offset+= gridDim.z*blockDim.x) {
grad_input[address_base+offset] = (static_cast<accscalar_t>(grad_output[address_base+offset]) - m_dy_c - (static_cast<accscalar_t>(input[address_base+offset]) - m_c) * factor_1_c) * factor_2_c;
}
}
}
// welford kernel for c last tensor calculating mean/biased_variance/unbiased_variance
template
<typename scalar_t,
typename accscalar_t,
typename outscalar_t,
int PARALLEL_LOADS>
__global__ void
welford_kernel_c_last(
const scalar_t* __restrict__ input,
outscalar_t* __restrict__ out_mean,
outscalar_t* __restrict__ out_var_biased,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t x_mean[PARALLEL_LOADS];
accscalar_t m_2_n[PARALLEL_LOADS];
int count[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
x_mean[i] = accscalar_t(0);
m_2_n[i] = accscalar_t(0);
count[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
accscalar_t x_math[PARALLEL_LOADS];
accscalar_t x_count_inv[PARALLEL_LOADS];
accscalar_t is_valid[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_math[j] = input[address_base];
count[j]++;
x_count_inv[j] = accscalar_t(1) / count[j];
is_valid[j] = accscalar_t(1);
} else {
x_math[j] = accscalar_t(0);
x_count_inv[j] = accscalar_t(0);
is_valid[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate mean/m2n with welford
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
accscalar_t delta0 = x_math[j] - x_mean[j];
x_mean[j] += delta0 * x_count_inv[j];
accscalar_t delta1 = x_math[j] - x_mean[j];
m_2_n[j] += delta0 * delta1 * is_valid[j];
}
}
// thread reduction to accumulate mean/m_2_n/count between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
welford_merge_element(count[0], x_mean[0], m_2_n[0], count[j], x_mean[j], m_2_n[j]);
}
// release x_mean / m_2_n
auto mean_th = x_mean[0];
auto m2_th = m_2_n[0];
auto count_th = count[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_mean[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_m2n[MAX_BLOCK_SIZE];
static __shared__ int shmem_count[MAX_BLOCK_SIZE];
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_mean = staging_data;
volatile accscalar_t* staging_m2n = &staging_data[stride*gridDim.y];
volatile int* staging_count = reinterpret_cast<volatile int*>(&staging_m2n[stride*gridDim.y]);
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_mean[address_base] = mean_th;
staging_m2n[address_base] = m2_th;
staging_count[address_base] = count_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
count_th = 0;
mean_th = accscalar_t(0.0);
m2_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
int num_new = c_offset < stride ? staging_count[address_base] : 0;
accscalar_t mean_new = c_offset < stride ? staging_mean[address_base] : accscalar_t(0.0);
accscalar_t m2n_new = c_offset < stride ? staging_m2n[address_base] : accscalar_t(0.0);
welford_merge_element(count_th, mean_th, m2_th, num_new, mean_new, m2n_new);
}
welford_merge_block_vertical(count_th, mean_th, m2_th, shmem_count, shmem_mean, shmem_m2n);
if (threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
out_mean[c_offset] = static_cast<outscalar_t>(mean_th);
out_var_biased[c_offset] = static_cast<outscalar_t>(m2_th / count_th);
}
}
}
// parallel welford kernel to further reduce mean / biased_var
// into mean / unbiased_var / inv_std across multiple processes.
template <typename scalar_t>
__global__ void welford_kernel_parallel(
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ var_biased,
scalar_t* __restrict__ out_mean,
scalar_t* __restrict__ out_var,
scalar_t* __restrict__ inv_std,
const int world_size,
const int feature_size,
const float eps,
const int numel) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < feature_size; i += gridDim.x * blockDim.x) {
// load data;
int address = i;
scalar_t x_mean = 0;
scalar_t m_2_n = 0;
int count = 0;
for (int j = 0; j < world_size; j++) {
welford_merge_element(count, x_mean, m_2_n, numel, mean[address], var_biased[address]*numel);
address += feature_size;
}
out_mean[i] = x_mean;
out_var[i] = m_2_n/ (count - 1);
inv_std[i] = scalar_t(1) / sqrt(m_2_n/count + eps);
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_forward_c_last_kernel(
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const layerscalar_t* __restrict__ shift,
scalar_t* __restrict__ out,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto inv_std_c = static_cast<accscalar_t>(inv_std[c_offset]);
auto w_c = weight == NULL ? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset]);
auto s_c = shift == NULL ? accscalar_t(0.0) : static_cast<accscalar_t>(shift[c_offset]);
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
out[address_base] = static_cast<scalar_t>(
w_c * (static_cast<accscalar_t>(input[address_base]) - m_c ) * inv_std_c + s_c
);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
// batchnorm backward kernel for c last tensor
template
<typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void reduce_bn_c_last_kernel(
const scalar_t* __restrict__ input,
const scalar_t* __restrict__ grad_output,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
accscalar_t* __restrict__ mean_dy,
accscalar_t* __restrict__ mean_dy_xmu,
layerscalar_t* __restrict__ grad_weight,
layerscalar_t* __restrict__ grad_bias,
volatile accscalar_t* staging_data,
int* semaphores,
const int reduction_size,
const int stride) {
// hide latency with concurrency
accscalar_t sum_dy[PARALLEL_LOADS];
accscalar_t sum_dy_xmu[PARALLEL_LOADS];
#pragma unroll
for (int i = 0; i < PARALLEL_LOADS; i++) {
sum_dy[i] = accscalar_t(0);
sum_dy_xmu[i] = accscalar_t(0);
}
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
auto r_mean = mean[c_offset];
auto factor = inv_std[c_offset];
for (int i = 0; i < loop_count; i++) {
accscalar_t x_input[PARALLEL_LOADS];
accscalar_t x_grad_output[PARALLEL_LOADS];
// load multiple data in
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
x_input[j] = input[address_base];
x_grad_output[j] = grad_output[address_base];
} else {
x_input[j] = accscalar_t(0);
x_grad_output[j] = accscalar_t(0);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
// calculate sum_dy / sum_dy_xmu
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
sum_dy[j] += x_grad_output[j];
sum_dy_xmu[j] += x_grad_output[j] * (x_input[j] - r_mean);
}
}
// thread reduction to accumulate sum_dy / sum_dy_xmu between PARALLEL_LOADS
#pragma unroll
for (int j = 1; j < PARALLEL_LOADS; j++) {
sum_dy[0] += sum_dy[j];
sum_dy_xmu[0] += sum_dy_xmu[j];
}
// release array of registers
auto sum_dy_th = sum_dy[0];
auto sum_dy_xmu_th = sum_dy_xmu[0];
// block-wise reduction with shared memory (since reduction cannot be done within a warp)
static __shared__ accscalar_t shmem_sum_dy[MAX_BLOCK_SIZE];
static __shared__ accscalar_t shmem_sum_dy_xmu[MAX_BLOCK_SIZE];
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
// grid reduction if needed (coop launch used at the first place)
if (gridDim.y > 1) {
volatile accscalar_t* staging_sum_dy = staging_data;
volatile accscalar_t* staging_sum_dy_xmu = &staging_data[stride*gridDim.y];
address_base = c_offset + blockIdx.y * stride;
// write data to staging_data;
if (threadIdx.y == 0 && c_offset < stride) {
staging_sum_dy[address_base] = sum_dy_th;
staging_sum_dy_xmu[address_base] = sum_dy_xmu_th;
}
__threadfence();
__syncthreads(); // ensuring writes to staging_ is visible to all blocks
__shared__ bool is_last_block_done;
// mark block done
if (threadIdx.x == 0 && threadIdx.y == 0) {
int old = atomicAdd(&semaphores[blockIdx.x], 1);
is_last_block_done = (old == (gridDim.y-1));
}
__syncthreads();
// check that all data is now available in global memory
if (is_last_block_done) {
sum_dy_th = accscalar_t(0.0);
sum_dy_xmu_th = accscalar_t(0.0);
for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) {
address_base = c_offset + y * stride;
sum_dy_th += (c_offset < stride ? staging_sum_dy[address_base] : accscalar_t(0.0));
sum_dy_xmu_th += (c_offset < stride ? staging_sum_dy_xmu[address_base] : accscalar_t(0.0));
}
merge_block_vertical(sum_dy_th, sum_dy_xmu_th, shmem_sum_dy, shmem_sum_dy_xmu);
if (threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
} else {
if (blockIdx.y == 0 && threadIdx.y == 0 && c_offset < stride) {
if (grad_bias != NULL) {
grad_bias[c_offset] = static_cast<layerscalar_t>(sum_dy_th);
}
if (grad_weight != NULL) {
grad_weight[c_offset] = static_cast<layerscalar_t>(sum_dy_xmu_th * factor);
}
mean_dy[c_offset] = sum_dy_th / reduction_size;
mean_dy_xmu[c_offset] = sum_dy_xmu_th / reduction_size;
}
}
}
// elementwise BN kernel
template <
typename scalar_t,
typename accscalar_t,
typename layerscalar_t,
int PARALLEL_LOADS>
__global__ void batchnorm_backward_c_last_kernel(
const scalar_t* __restrict__ grad_output,
const scalar_t* __restrict__ input,
const accscalar_t* __restrict__ mean,
const accscalar_t* __restrict__ inv_std,
const layerscalar_t* __restrict__ weight,
const accscalar_t* __restrict__ mean_dy,
const accscalar_t* __restrict__ mean_dy_xmu,
scalar_t* __restrict__ grad_input,
const int reduction_size,
const int stride) {
// tensor dimension (m,c)
// loop along m dimension
int inner_loop_stride = blockDim.y * gridDim.y;
// offset along m dimension
int m_offset = blockIdx.y * blockDim.y + threadIdx.y;
int c_offset = blockIdx.x * blockDim.x + threadIdx.x;
auto m_c = mean[c_offset];
auto m_dy_c = mean_dy[c_offset];
auto factor_1_c = inv_std[c_offset];
auto factor_2_c = (weight == NULL? accscalar_t(1.0) : static_cast<accscalar_t>(weight[c_offset])) * factor_1_c;
factor_1_c = factor_1_c * factor_1_c * mean_dy_xmu[c_offset];
int loop_count = 1 + (reduction_size - 1) / (inner_loop_stride * PARALLEL_LOADS);
int address_base = m_offset * stride + c_offset;
int address_increment = inner_loop_stride * stride;
for (int i = 0; i < loop_count; i++) {
#pragma unroll
for (int j = 0; j < PARALLEL_LOADS; j++) {
if (c_offset < stride && m_offset < reduction_size) {
grad_input[address_base] = static_cast<scalar_t>(
(static_cast<accscalar_t>(grad_output[address_base]) - m_dy_c -
(static_cast<accscalar_t>(input[address_base]) - m_c) * factor_1_c)
* factor_2_c);
}
m_offset += inner_loop_stride;
address_base += address_increment;
}
}
}
std::vector<at::Tensor> welford_mean_var_CUDA(const at::Tensor input) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto space_size = get_tensor_spatial_size(input);
auto scalar_type = promote_scalartype(input);
at::Tensor out_var_biased = at::empty({feature_size}, input.options().dtype(scalar_type));
at::Tensor out_mean = at::empty({feature_size}, input.options().dtype(scalar_type));
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE / 32));
int block_x = max(1, min(MAX_BLOCK_SIZE / block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
welford_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
batch_size,
feature_size,
space_size);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor out = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>() : NULL,
out.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight)
{
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
auto scalar_type = promote_scalartype(input);
at::Tensor mean_dy = at::empty({feature_size}, mean.options());
at::Tensor mean_dy_xmu = at::empty({feature_size}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({feature_size}, weight.value().options());
grad_bias = at::empty({feature_size}, weight.value().options());
} else {
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
auto space_size = get_tensor_spatial_size(input);
int block_y = min(h_last_pow2(batch_size), int(MAX_BLOCK_SIZE/ 32));
int block_x = max(1, min(MAX_BLOCK_SIZE/ block_y, h_last_pow2(space_size)));
const dim3 block(block_x, block_y);
const dim3 grid(feature_size);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
reduce_bn_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ? grad_bias.data<accscalar_t>() : NULL,
batch_size,
feature_size,
space_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
reduce_bn_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ? grad_bias.data<scalar_t_0>() : NULL,
batch_size,
feature_size,
space_size);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto batch_size = input.size(0);
const auto feature_size = input.size(1);
at::Tensor grad_input = at::empty_like(input);
auto space_size = get_tensor_spatial_size(input);
int block_x = max(32, min(MAX_BLOCK_SIZE, h_last_pow2(space_size)/4));
int block_y = max(1, min(MAX_BLOCK_SIZE/block_x, h_last_pow2(batch_size)/4));
const dim3 block(block_x, block_y);
int grid_z = max(1, min(65535, h_last_pow2(space_size)/4/block_x));
int batch_group_size = max(1, min(65535, h_last_pow2(batch_size)/block_y));
const dim3 grid(feature_size, batch_group_size, grid_z);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() &&
weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_kernel<scalar_t_0, accscalar_t, accscalar_t><<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_kernel<scalar_t_0, accscalar_t, scalar_t_0><<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
space_size,
batch_size);
);
}
return grad_input;
}
std::vector<at::Tensor> welford_parallel_CUDA(const at::Tensor mean_feature_nodes,
const at::Tensor var_biased,
int numel,
const float eps) {
const auto world_size = mean_feature_nodes.size(0);
const auto feature_size = mean_feature_nodes.size(1);
at::Tensor out_var = at::empty({feature_size}, var_biased.options());
at::Tensor inv_std = at::empty_like(out_var);
at::Tensor out_mean = at::empty_like(out_var);
// TODO(jie): tile this for memory coalescing!
const int block = std::min(h_last_pow2(feature_size), MAX_BLOCK_SIZE);
const int grid = std::max<int>(1, feature_size / block);
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(mean_feature_nodes.scalar_type(), 0, "welford_parallel_kernel",
welford_kernel_parallel<scalar_t_0><<<grid, block, 0, stream>>>(
mean_feature_nodes.data<scalar_t_0>(),
var_biased.data<scalar_t_0>(),
out_mean.data<scalar_t_0>(),
out_var.data<scalar_t_0>(),
inv_std.data<scalar_t_0>(),
world_size,
feature_size,
eps,
numel);
);
}
return {out_mean, out_var, inv_std};
}
std::vector<at::Tensor> welford_mean_var_c_last_CUDA(const at::Tensor input) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
auto scalar_type = promote_scalartype(input);
auto option = input.options().dtype(scalar_type);
at::Tensor out_var_biased = at::empty({stride}, option);
at::Tensor out_mean = at::empty({stride}, option);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({4*stride*grid.y}, option);
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::cuda::getCurrentCUDAStream();
{
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "welford_mean_var_c_last",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
welford_kernel_c_last<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
out_mean.data<accscalar_t>(),
out_var_biased.data<accscalar_t>(),
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {out_mean, out_var_biased};
}
at::Tensor batchnorm_forward_c_last_CUDA(
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::optional<at::Tensor> shift) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor out = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
shift.has_value() ? shift.value().data<accscalar_t>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_forward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
shift.has_value() ? shift.value().data<scalar_t_0>(): NULL,
out.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return out;
}
std::vector<at::Tensor> reduce_bn_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor mean_dy = at::empty({stride}, mean.options());
at::Tensor mean_dy_xmu = at::empty({stride}, mean.options());
at::Tensor grad_weight;
at::Tensor grad_bias;
if (weight.has_value()) {
grad_weight = at::empty({stride}, weight.value().options());
grad_bias = at::empty({stride}, weight.value().options());
} else {
// because I cannot return an uninitialized at::Tensor
grad_weight = at::empty({0}, mean.options());
grad_bias = at::empty({0}, mean.options());
}
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid, true);
at::Tensor staging_data;
at::Tensor semaphores;
if (grid.y > 1) {
staging_data = at::empty({2*stride*grid.y}, mean.options());
semaphores = at::zeros({grid.x}, input.options().dtype(at::kInt));
}
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value()
&& weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<accscalar_t>() : NULL,
weight.has_value() ?grad_bias.data<accscalar_t>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_backward_reduce",
using accscalar_t = at::acc_type<scalar_t_0, true>;
accscalar_t* staging_data_ptr = grid.y > 1 ? staging_data.data<accscalar_t>() : nullptr;
int* semaphores_ptr = grid.y > 1 ? semaphores.data<int>() : nullptr;
reduce_bn_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
input.data<scalar_t_0>(),
grad_output.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
weight.has_value() ? grad_weight.data<scalar_t_0>() : NULL,
weight.has_value() ?grad_bias.data<scalar_t_0>() : NULL,
staging_data_ptr,
semaphores_ptr,
reduction_size,
stride);
);
}
return {mean_dy, mean_dy_xmu, grad_weight, grad_bias};
}
at::Tensor batchnorm_backward_c_last_CUDA(
const at::Tensor grad_output,
const at::Tensor input,
const at::Tensor mean,
const at::Tensor inv_std,
const at::optional<at::Tensor> weight,
const at::Tensor mean_dy,
const at::Tensor mean_dy_xmu) {
const auto stride = input.size(input.ndimension()-1);
const auto reduction_size = input.numel() / stride;
at::Tensor grad_input = at::empty_like(input);
dim3 block;
dim3 grid;
flexible_launch_configs(reduction_size, stride, block, grid);
auto stream = at::cuda::getCurrentCUDAStream();
if (input.scalar_type() == at::ScalarType::Half
&& weight.has_value() && weight.value().scalar_type() == at::ScalarType::Float) {
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, accscalar_t, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<accscalar_t>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
} else {
if (weight.has_value()) {
AT_CHECK(input.scalar_type() == weight.value().scalar_type(),
"input.scalar_type() is not supported with weight.scalar_type()");
}
using namespace at;
DISPATCH_FLOAT_AND_HALF(input.scalar_type(), 0, "batchnorm_forward",
using accscalar_t = at::acc_type<scalar_t_0, true>;
batchnorm_backward_c_last_kernel<scalar_t_0, accscalar_t, scalar_t_0, ELEMENTS_PER_ITER>
<<<grid, block, 0, stream>>>(
grad_output.data<scalar_t_0>(),
input.data<scalar_t_0>(),
mean.data<accscalar_t>(),
inv_std.data<accscalar_t>(),
weight.has_value() ? weight.value().data<scalar_t_0>() : NULL,
mean_dy.data<accscalar_t>(),
mean_dy_xmu.data<accscalar_t>(),
grad_input.data<scalar_t_0>(),
reduction_size,
stride);
);
}
return grad_input;
}
|
907dd0f9319d3d5f67f6b57a02dc54c31ceb46a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/storage/builder.hpp>
#include <gridtools/storage/gpu.hpp>
__global__ void check_s1(int *s) {
assert(s[0] == 10);
assert(s[1] == 20);
s[0] = 30;
s[1] = 40;
}
__global__ void check_s2(int *s) {
assert(s[0] == 100);
assert(s[1] == 200);
s[0] = 300;
s[1] = 400;
}
TEST(StorageCudaTest, Simple) {
auto builder = gridtools::storage::builder<gridtools::storage::gpu>.type<int>().dimensions(2);
// create two storages
auto s1 = builder();
auto s2 = builder();
// write some values
s1->host_view()(0) = 10;
s1->host_view()(1) = 20;
s2->host_view()(0) = 100;
s2->host_view()(1) = 200;
// assert if the values were not copied correctly and reset values
hipLaunchKernelGGL(( check_s1), dim3(1), dim3(1), 0, 0, s1->get_target_ptr());
hipLaunchKernelGGL(( check_s2), dim3(1), dim3(1), 0, 0, s2->get_target_ptr());
// check values
EXPECT_EQ(s1->host_view()(1), 40);
EXPECT_EQ(s1->host_view()(0), 30);
EXPECT_EQ(s2->host_view()(1), 400);
EXPECT_EQ(s2->host_view()(0), 300);
}
| 907dd0f9319d3d5f67f6b57a02dc54c31ceb46a1.cu | /*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/storage/builder.hpp>
#include <gridtools/storage/gpu.hpp>
__global__ void check_s1(int *s) {
assert(s[0] == 10);
assert(s[1] == 20);
s[0] = 30;
s[1] = 40;
}
__global__ void check_s2(int *s) {
assert(s[0] == 100);
assert(s[1] == 200);
s[0] = 300;
s[1] = 400;
}
TEST(StorageCudaTest, Simple) {
auto builder = gridtools::storage::builder<gridtools::storage::gpu>.type<int>().dimensions(2);
// create two storages
auto s1 = builder();
auto s2 = builder();
// write some values
s1->host_view()(0) = 10;
s1->host_view()(1) = 20;
s2->host_view()(0) = 100;
s2->host_view()(1) = 200;
// assert if the values were not copied correctly and reset values
check_s1<<<1, 1>>>(s1->get_target_ptr());
check_s2<<<1, 1>>>(s2->get_target_ptr());
// check values
EXPECT_EQ(s1->host_view()(1), 40);
EXPECT_EQ(s1->host_view()(0), 30);
EXPECT_EQ(s2->host_view()(1), 400);
EXPECT_EQ(s2->host_view()(0), 300);
}
|
00f2906a71ca98cf65acb0b25122c2ec6b9e57e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "csr_sparse_matrix_to_dense.cuh"
#include "include/hip/hip_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T, typename S>
__global__ void CSRSparseMatrixToDenseKernel(const T *dense_shape_addr, T *batch_ptr_addr, T *row_ptr_addr,
T *col_indices_addr, S *values_addr, S *output, size_t ndim, size_t rows) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
T cols = dense_shape_addr[ndim - 1];
T batch_rows = dense_shape_addr[ndim - 2];
int batch_index = i / (batch_rows + 1);
T nnz = row_ptr_addr[i + 1] - row_ptr_addr[i];
for (T j = 0; j < nnz; ++j) {
T index = batch_ptr_addr[batch_index] + row_ptr_addr[i] + j;
S value = values_addr[index];
T col_index = col_indices_addr[index];
T output_index = (i - batch_index) * cols + col_index;
output[output_index] += value;
}
}
}
template <typename T, typename S>
void CalCSRSparseMatrixToDense(const T *dense_shape_addr, T *batch_ptr_addr, T *row_ptr_addr, T *col_indices_addr,
S *values_addr, S *output, size_t ndim, size_t rows, size_t nums,
hipStream_t cuda_stream) {
hipMemsetAsync(output, 0, nums * sizeof(S), cuda_stream);
hipLaunchKernelGGL(( CSRSparseMatrixToDenseKernel), dim3(GET_BLOCKS(rows)), dim3(GET_THREADS), 0, cuda_stream,
dense_shape_addr, batch_ptr_addr, row_ptr_addr, col_indices_addr, values_addr, output, ndim, rows);
return;
}
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, half>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
half *values_addr, half *output, size_t ndim,
size_t rows, size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, float>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
float *values_addr, float *output, size_t ndim,
size_t rows, size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, double>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
double *values_addr, double *output, size_t ndim,
size_t rows, size_t nums,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, Complex<float>>(
const int *dense_shape_addr, int *batch_ptr_addr, int *row_ptr_addr, int *col_indices_addr,
Complex<float> *values_addr, Complex<float> *output, size_t ndim, size_t rows, size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, Complex<double>>(
const int *dense_shape_addr, int *batch_ptr_addr, int *row_ptr_addr, int *col_indices_addr,
Complex<double> *values_addr, Complex<double> *output, size_t ndim, size_t rows, size_t nums,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, half>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, half *values_addr,
half *output, size_t ndim, size_t rows,
size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, float>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, float *values_addr,
float *output, size_t ndim, size_t rows,
size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, double>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, double *values_addr,
double *output, size_t ndim, size_t rows,
size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, Complex<float>>(
const int64_t *dense_shape_addr, int64_t *batch_ptr_addr, int64_t *row_ptr_addr, int64_t *col_indices_addr,
Complex<float> *values_addr, Complex<float> *output, size_t ndim, size_t rows, size_t nums, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, Complex<double>>(
const int64_t *dense_shape_addr, int64_t *batch_ptr_addr, int64_t *row_ptr_addr, int64_t *col_indices_addr,
Complex<double> *values_addr, Complex<double> *output, size_t ndim, size_t rows, size_t nums,
hipStream_t cuda_stream);
| 00f2906a71ca98cf65acb0b25122c2ec6b9e57e2.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include "csr_sparse_matrix_to_dense.cuh"
#include "include/cuda_fp16.h"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/complex.h"
template <typename T, typename S>
__global__ void CSRSparseMatrixToDenseKernel(const T *dense_shape_addr, T *batch_ptr_addr, T *row_ptr_addr,
T *col_indices_addr, S *values_addr, S *output, size_t ndim, size_t rows) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < rows; i += blockDim.x * gridDim.x) {
T cols = dense_shape_addr[ndim - 1];
T batch_rows = dense_shape_addr[ndim - 2];
int batch_index = i / (batch_rows + 1);
T nnz = row_ptr_addr[i + 1] - row_ptr_addr[i];
for (T j = 0; j < nnz; ++j) {
T index = batch_ptr_addr[batch_index] + row_ptr_addr[i] + j;
S value = values_addr[index];
T col_index = col_indices_addr[index];
T output_index = (i - batch_index) * cols + col_index;
output[output_index] += value;
}
}
}
template <typename T, typename S>
void CalCSRSparseMatrixToDense(const T *dense_shape_addr, T *batch_ptr_addr, T *row_ptr_addr, T *col_indices_addr,
S *values_addr, S *output, size_t ndim, size_t rows, size_t nums,
cudaStream_t cuda_stream) {
cudaMemsetAsync(output, 0, nums * sizeof(S), cuda_stream);
CSRSparseMatrixToDenseKernel<<<GET_BLOCKS(rows), GET_THREADS, 0, cuda_stream>>>(
dense_shape_addr, batch_ptr_addr, row_ptr_addr, col_indices_addr, values_addr, output, ndim, rows);
return;
}
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, half>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
half *values_addr, half *output, size_t ndim,
size_t rows, size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, float>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
float *values_addr, float *output, size_t ndim,
size_t rows, size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, double>(const int *dense_shape_addr, int *batch_ptr_addr,
int *row_ptr_addr, int *col_indices_addr,
double *values_addr, double *output, size_t ndim,
size_t rows, size_t nums,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, Complex<float>>(
const int *dense_shape_addr, int *batch_ptr_addr, int *row_ptr_addr, int *col_indices_addr,
Complex<float> *values_addr, Complex<float> *output, size_t ndim, size_t rows, size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int, Complex<double>>(
const int *dense_shape_addr, int *batch_ptr_addr, int *row_ptr_addr, int *col_indices_addr,
Complex<double> *values_addr, Complex<double> *output, size_t ndim, size_t rows, size_t nums,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, half>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, half *values_addr,
half *output, size_t ndim, size_t rows,
size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, float>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, float *values_addr,
float *output, size_t ndim, size_t rows,
size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, double>(const int64_t *dense_shape_addr,
int64_t *batch_ptr_addr, int64_t *row_ptr_addr,
int64_t *col_indices_addr, double *values_addr,
double *output, size_t ndim, size_t rows,
size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, Complex<float>>(
const int64_t *dense_shape_addr, int64_t *batch_ptr_addr, int64_t *row_ptr_addr, int64_t *col_indices_addr,
Complex<float> *values_addr, Complex<float> *output, size_t ndim, size_t rows, size_t nums, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void CalCSRSparseMatrixToDense<int64_t, Complex<double>>(
const int64_t *dense_shape_addr, int64_t *batch_ptr_addr, int64_t *row_ptr_addr, int64_t *col_indices_addr,
Complex<double> *values_addr, Complex<double> *output, size_t ndim, size_t rows, size_t nums,
cudaStream_t cuda_stream);
|
3d92c30efc487e2d7aaf44b52af25b1f03cd56a4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) :
policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
GPUSet::GpuIdType device_id,
size_t device_index,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.DeviceSize(device_id);
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan(device_id);
auto s_preds = preds.DeviceSpan(device_id);
auto s_weights = weights.DeviceSpan(device_id);
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(allocators_.at(device_index)),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
GPUSet devices,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (devices.IsEmpty()) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
if (allocators_.size() != devices.Size()) {
allocators_.clear();
allocators_.resize(devices.Size());
}
preds.Shard(devices);
labels.Shard(devices);
weights.Shard(devices);
std::vector<PackedReduceResult> res_per_device(devices.Size());
#pragma omp parallel for schedule(static, 1) if (devices.Size() > 1)
for (GPUSet::GpuIdType id = *devices.begin(); id < *devices.end(); ++id) {
dh::safe_cuda(hipSetDevice(id));
size_t index = devices.Index(id);
res_per_device.at(index) =
DeviceReduceMetrics(id, index, weights, labels, preds);
}
for (auto const& res : res_per_device) {
result += res;
}
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
std::vector<dh::CubMemory> allocators_;
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * ::log(eps) - (1.0f - y) * ::log(1.0f - eps);
} else if (pneg < eps) {
return -y * ::log(1.0f - eps) - (1.0f - y) * ::log(eps);
} else {
return -y * ::log(py) - (1.0f - y) * ::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - ::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - ::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -::log(-theta);
bst_float c = 1. / psi * ::log(y/psi) - ::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * ::exp((1 - rho_) * ::log(p)) / (1 - rho_);
bst_float b = ::exp((2 - rho_) * ::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() : policy_{}, reducer_{policy_} {}
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
const auto ndata = static_cast<omp_ulong>(info.labels_.Size());
// Dealing with ndata < n_gpus.
GPUSet devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata);
auto result =
reducer_.Reduce(devices, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_;
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
| 3d92c30efc487e2d7aaf44b52af25b1f03cd56a4.cu | /*!
* Copyright 2015-2019 by Contributors
* \file elementwise_metric.cc
* \brief evaluation metrics for elementwise binary or regression.
* \author Kailong Chen, Tianqi Chen
*/
#include <rabit/rabit.h>
#include <xgboost/metric.h>
#include <dmlc/registry.h>
#include <cmath>
#include "metric_common.h"
#include "../common/math.h"
#include "../common/common.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include <thrust/functional.h> // thrust::plus<>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(elementwise_metric);
template <typename EvalRow>
class ElementWiseMetricsReduction {
public:
explicit ElementWiseMetricsReduction(EvalRow policy) :
policy_(std::move(policy)) {}
PackedReduceResult CpuReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) const {
size_t ndata = labels.Size();
const auto& h_labels = labels.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
bst_float residue_sum = 0;
bst_float weights_sum = 0;
#pragma omp parallel for reduction(+: residue_sum, weights_sum) schedule(static)
for (omp_ulong i = 0; i < ndata; ++i) {
const bst_float wt = h_weights.size() > 0 ? h_weights[i] : 1.0f;
residue_sum += policy_.EvalRow(h_labels[i], h_preds[i]) * wt;
weights_sum += wt;
}
PackedReduceResult res { residue_sum, weights_sum };
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
GPUSet::GpuIdType device_id,
size_t device_index,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
size_t n_data = preds.DeviceSize(device_id);
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + n_data;
auto s_label = labels.DeviceSpan(device_id);
auto s_preds = preds.DeviceSpan(device_id);
auto s_weights = weights.DeviceSpan(device_id);
bool const is_null_weight = weights.Size() == 0;
auto d_policy = policy_;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(allocators_.at(device_index)),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
bst_float weight = is_null_weight ? 1.0f : s_weights[idx];
bst_float residue = d_policy.EvalRow(s_label[idx], s_preds[idx]);
residue *= weight;
return PackedReduceResult{ residue, weight };
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
GPUSet devices,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (devices.IsEmpty()) {
result = CpuReduceMetrics(weights, labels, preds);
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
if (allocators_.size() != devices.Size()) {
allocators_.clear();
allocators_.resize(devices.Size());
}
preds.Shard(devices);
labels.Shard(devices);
weights.Shard(devices);
std::vector<PackedReduceResult> res_per_device(devices.Size());
#pragma omp parallel for schedule(static, 1) if (devices.Size() > 1)
for (GPUSet::GpuIdType id = *devices.begin(); id < *devices.end(); ++id) {
dh::safe_cuda(cudaSetDevice(id));
size_t index = devices.Index(id);
res_per_device.at(index) =
DeviceReduceMetrics(id, index, weights, labels, preds);
}
for (auto const& res : res_per_device) {
result += res;
}
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
#if defined(XGBOOST_USE_CUDA)
std::vector<dh::CubMemory> allocators_;
#endif // defined(XGBOOST_USE_CUDA)
};
struct EvalRowRMSE {
char const *Name() const {
return "rmse";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float diff = label - pred;
return diff * diff;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return std::sqrt(esum / wsum);
}
};
struct EvalRowMAE {
const char *Name() const {
return "mae";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
return std::abs(label - pred);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalRowLogLoss {
const char *Name() const {
return "logloss";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
const bst_float pneg = 1.0f - py;
if (py < eps) {
return -y * std::log(eps) - (1.0f - y) * std::log(1.0f - eps);
} else if (pneg < eps) {
return -y * std::log(1.0f - eps) - (1.0f - y) * std::log(eps);
} else {
return -y * std::log(py) - (1.0f - y) * std::log(pneg);
}
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalError {
explicit EvalError(const char* param) {
if (param != nullptr) {
CHECK_EQ(sscanf(param, "%f", &threshold_), 1)
<< "unable to parse the threshold value for the error metric";
has_param_ = true;
} else {
threshold_ = 0.5f;
has_param_ = false;
}
}
const char *Name() const {
static std::string name;
if (has_param_) {
std::ostringstream os;
os << "error";
if (threshold_ != 0.5f) os << '@' << threshold_;
name = os.str();
return name.c_str();
} else {
return "error";
}
}
XGBOOST_DEVICE bst_float EvalRow(
bst_float label, bst_float pred) const {
// assume label is in [0,1]
return pred > threshold_ ? 1.0f - label : label;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
private:
bst_float threshold_;
bool has_param_;
};
struct EvalPoissonNegLogLik {
const char *Name() const {
return "poisson-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
const bst_float eps = 1e-16f;
if (py < eps) py = eps;
return common::LogGamma(y + 1.0f) + py - std::log(py) * y;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalGammaDeviance {
const char *Name() const {
return "gamma-deviance";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float label, bst_float pred) const {
bst_float epsilon = 1.0e-9;
bst_float tmp = label / (pred + epsilon);
return tmp - std::log(tmp) - 1;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return 2 * esum;
}
};
struct EvalGammaNLogLik {
static const char *Name() {
return "gamma-nloglik";
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float py) const {
bst_float psi = 1.0;
bst_float theta = -1. / py;
bst_float a = psi;
bst_float b = -std::log(-theta);
bst_float c = 1. / psi * std::log(y/psi) - std::log(y) - common::LogGamma(1. / psi);
return -((y * theta - b) / a + c);
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
};
struct EvalTweedieNLogLik {
explicit EvalTweedieNLogLik(const char* param) {
CHECK(param != nullptr)
<< "tweedie-nloglik must be in format tweedie-nloglik@rho";
rho_ = atof(param);
CHECK(rho_ < 2 && rho_ >= 1)
<< "tweedie variance power must be in interval [1, 2)";
}
const char *Name() const {
static std::string name;
std::ostringstream os;
os << "tweedie-nloglik@" << rho_;
name = os.str();
return name.c_str();
}
XGBOOST_DEVICE bst_float EvalRow(bst_float y, bst_float p) const {
bst_float a = y * std::exp((1 - rho_) * std::log(p)) / (1 - rho_);
bst_float b = std::exp((2 - rho_) * std::log(p)) / (2 - rho_);
return -a + b;
}
static bst_float GetFinal(bst_float esum, bst_float wsum) {
return esum / wsum;
}
protected:
bst_float rho_;
};
/*!
* \brief base class of element-wise evaluation
* \tparam Derived the name of subclass
*/
template<typename Policy>
struct EvalEWiseBase : public Metric {
EvalEWiseBase() : policy_{}, reducer_{policy_} {}
explicit EvalEWiseBase(char const* policy_param) :
policy_{policy_param}, reducer_{policy_} {}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_NE(info.labels_.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels_.Size())
<< "label and prediction size not match, "
<< "hint: use merror or mlogloss for multi-class classification";
const auto ndata = static_cast<omp_ulong>(info.labels_.Size());
// Dealing with ndata < n_gpus.
GPUSet devices = GPUSet::All(tparam_->gpu_id, tparam_->n_gpus, ndata);
auto result =
reducer_.Reduce(devices, info.weights_, info.labels_, preds);
double dat[2] { result.Residue(), result.Weights() };
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return Policy::GetFinal(dat[0], dat[1]);
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseMetricsReduction<Policy> reducer_;
};
XGBOOST_REGISTER_METRIC(RMSE, "rmse")
.describe("Rooted mean square error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowRMSE>(); });
XGBOOST_REGISTER_METRIC(MAE, "mae")
.describe("Mean absolute error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowMAE>(); });
XGBOOST_REGISTER_METRIC(LogLoss, "logloss")
.describe("Negative loglikelihood for logistic regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalRowLogLoss>(); });
XGBOOST_REGISTER_METRIC(PossionNegLoglik, "poisson-nloglik")
.describe("Negative loglikelihood for poisson regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalPoissonNegLogLik>(); });
XGBOOST_REGISTER_METRIC(GammaDeviance, "gamma-deviance")
.describe("Residual deviance for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaDeviance>(); });
XGBOOST_REGISTER_METRIC(GammaNLogLik, "gamma-nloglik")
.describe("Negative log-likelihood for gamma regression.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalGammaNLogLik>(); });
XGBOOST_REGISTER_METRIC(Error, "error")
.describe("Binary classification error.")
.set_body([](const char* param) { return new EvalEWiseBase<EvalError>(param); });
XGBOOST_REGISTER_METRIC(TweedieNLogLik, "tweedie-nloglik")
.describe("tweedie-nloglik@rho for tweedie regression.")
.set_body([](const char* param) {
return new EvalEWiseBase<EvalTweedieNLogLik>(param);
});
} // namespace metric
} // namespace xgboost
|
f343df0f0ebadcf696537025f9521c05b0e28bdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <math.h>
/* Initialize random number generators */
__global__ void init_rand(hiprandState_t *state, int sd)
{
int idx = blockIdx.x;
hiprand_init(1337, idx*sd, 0, &state[idx]);
}
__global__ void gibbs_sample(hiprandState_t *state, float* alpha, float* beta, float* samples,
int N, int nsamps, int nsweeps)
{
int blk = blockIdx.x;
int i, j;
int cell, samp, sweep;
float df=0.0;
float bdif=0.0;
float p1=0.0;
for(samp=0; samp < nsamps; samp++)
{
int sampstart = blk*N*nsamps + samp*N;
for(sweep=0; sweep<nsweeps; sweep++)
{
for(cell=0; cell < N; cell++)
{
bdif=0.0;
df = 0.0;
p1=0.0;
for(j=0; j<=cell-1; j++)
{
bdif += beta[j*(N)-j*(j+1)/2+i]*samples[sampstart+j];
}
for(j=cell; j<=N-1; j++)
{
bdif += beta[cell*(N)-cell*(cell+1)/2 +j]*samples[sampstart+j];
}
df = -1.0*alpha[cell] - bdif;
p1 =expf(df)/(1+expf(df));
if(hiprand_uniform(&state[blk]) < p1)
{
samples[sampstart + cell] = 1.0;
}
else
{
samples[sampstart + cell] = 0.0;
}
}
}
}
}
__global__ void compute_sample_mean(float* samples, float* sample_mean, int nsamps, int N)
{
int cell = blockIdx.x;
sample_mean[cell] = 0;
for(int i = 0; i<nsamps; i++)
{
sample_mean[cell] += samples[cell + i*N];
}
sample_mean[cell] = sample_mean[cell] / nsamps;
}
__global__ void compute_sample_covariance(float* samples, float* sample_covariance, int nsamps, int N)
{
int i = blockIdx.x;
int j = threadIdx.x;
sample_covariance[i*N +j] = 0.0;
for(int samp = 0; samp<nsamps; samp++)
{
sample_covariance[i*N+j] += samples[samp*N+i]*samples[samp*N+j];
}
sample_covariance[i*N+j] = sample_covariance[i*N+j]/nsamps;
}
__global__ void update_alpha_estimate(float* alpha, float* sample_mean, float* data_mean, float eta, int N)
{
int i = blockIdx.x;
{
alpha[i] += eta*(sample_mean[i] - data_mean[i]);
}
}
__global__ void update_beta_estimate(float* beta, float* sample_covariance, float* data_covariance, float eta, int N)
{
int i = blockIdx.x;
{
alpha[i] += eta*(sample_mean[i] - data_mean[i]);
}
}
int write_paths_to_csv(char* fname, float* paths, int n_paths, int nt, char* mode)
{
FILE *outfile;
outfile = fopen(fname, mode);
int i, j;
if(outfile != NULL)
{
for(i = 0; i<n_paths; i++)
{
for(j = 0; j<nt; j++)
{
fprintf(outfile, "%f,", paths[i*nt + j]);
}
fprintf(outfile, "\n");
}
}
fclose(outfile);
return 0;
}
int main()
{
int N = 20; /* Number of Neurons */
int samps_per_block = 1024;
int nblocks = 1024;
int nsamps_tot = samps_per_block*nblocks;
int nsweeps = 10;
/* Compute sizes of various data structures */
size_t mean_size = N*sizeof(float);
size_t cov_size = (N*(N+1)/2)*sizeof(float);
size_t full_cov_size = N*N*sizeof(float);
size_t samples_size = N*nsamps_tot*sizeof(float);
/* allocate result memory */
float *alpha_res = (float*)malloc(mean_size);
float *beta_res = (float*)malloc(cov_size);
float *sample_mean_res = (float*)malloc(mean_size);
float *sample_covariance_res = (float*)malloc(full_cov_size);
float *samples_res = (float*)malloc(samples_size);
/* Allocate device memory */
float *sample_mean;
float *sample_covariance;
float *samples;
float *alpha;
float *beta;
dim3 cov_block(N, N);
hipMalloc(&sample_mean, mean_size);
hipMalloc(&sample_covariance, full_cov_size);
hipMalloc(&samples, samples_size);
hipMalloc(&alpha, mean_size);
hipMalloc(&beta, cov_size);
hiprandState_t *d_state;
hipMalloc(&d_state, nblocks);
/* generate random initial conditoins */
srand(time(NULL));
int i;
for(i=0; i<N; i++)
{
alpha_res[i] = (2*((float)rand()/(float)RAND_MAX) - 1);
/*printf("%f\n", alpha_res[i]);*/
}
for(i=0; i<(N*(N+1)/2); i++)
{
beta_res[i]= 0.1*(2*((float)rand()/(float)RAND_MAX) - 1);
}
/* copy initial conditions over to device */
hipMemcpy(alpha, alpha_res, mean_size, hipMemcpyHostToDevice);
hipMemcpy(beta, beta_res, cov_size, hipMemcpyHostToDevice);
/* sample */
printf("Sampling...\n");
hipLaunchKernelGGL(( init_rand), dim3(nblocks), dim3(1), 0, 0, d_state, time(NULL));
hipLaunchKernelGGL(( gibbs_sample), dim3(nblocks), dim3(1), 0, 0, d_state, alpha, beta, samples, N, samps_per_block, nsweeps);
printf("Finished. nsamps=%d\n", nsamps_tot);
printf("Computing Sample mean...\n");
hipLaunchKernelGGL(( compute_sample_mean), dim3(N), dim3(1), 0, 0, samples, sample_mean, nsamps_tot, N);
hipLaunchKernelGGL(( compute_sample_covariance), dim3(N), dim3(N), 0, 0, samples, sample_covariance, nsamps_tot, N);
/*copy back sample mean*/
hipMemcpy(sample_mean_res, sample_mean, mean_size, hipMemcpyDeviceToHost);
hipMemcpy(samples_res, samples, samples_size, hipMemcpyDeviceToHost);
hipMemcpy(sample_covariance_res, sample_covariance, full_cov_size, hipMemcpyDeviceToHost);
/* Display*/
for(i=0; i<N; i++)
{
printf("%f\n", sample_mean_res[i]);
}
write_paths_to_csv("maxent_samples.csv", samples_res, nsamps_tot, N, "w");
write_paths_to_csv("maxent_cov.csv", sample_covariance_res, N, N, "w");
/* free memory */
free(alpha_res);
free(beta_res);
free(sample_mean_res);
hipFree(sample_mean);
hipFree(sample_covariance);
hipFree(samples);
hipFree(alpha);
hipFree(beta);
hipFree(d_state);
return 0;
}
| f343df0f0ebadcf696537025f9521c05b0e28bdf.cu | #include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <math.h>
/* Initialize random number generators */
__global__ void init_rand(curandState *state, int sd)
{
int idx = blockIdx.x;
curand_init(1337, idx*sd, 0, &state[idx]);
}
__global__ void gibbs_sample(curandState *state, float* alpha, float* beta, float* samples,
int N, int nsamps, int nsweeps)
{
int blk = blockIdx.x;
int i, j;
int cell, samp, sweep;
float df=0.0;
float bdif=0.0;
float p1=0.0;
for(samp=0; samp < nsamps; samp++)
{
int sampstart = blk*N*nsamps + samp*N;
for(sweep=0; sweep<nsweeps; sweep++)
{
for(cell=0; cell < N; cell++)
{
bdif=0.0;
df = 0.0;
p1=0.0;
for(j=0; j<=cell-1; j++)
{
bdif += beta[j*(N)-j*(j+1)/2+i]*samples[sampstart+j];
}
for(j=cell; j<=N-1; j++)
{
bdif += beta[cell*(N)-cell*(cell+1)/2 +j]*samples[sampstart+j];
}
df = -1.0*alpha[cell] - bdif;
p1 =expf(df)/(1+expf(df));
if(curand_uniform(&state[blk]) < p1)
{
samples[sampstart + cell] = 1.0;
}
else
{
samples[sampstart + cell] = 0.0;
}
}
}
}
}
__global__ void compute_sample_mean(float* samples, float* sample_mean, int nsamps, int N)
{
int cell = blockIdx.x;
sample_mean[cell] = 0;
for(int i = 0; i<nsamps; i++)
{
sample_mean[cell] += samples[cell + i*N];
}
sample_mean[cell] = sample_mean[cell] / nsamps;
}
__global__ void compute_sample_covariance(float* samples, float* sample_covariance, int nsamps, int N)
{
int i = blockIdx.x;
int j = threadIdx.x;
sample_covariance[i*N +j] = 0.0;
for(int samp = 0; samp<nsamps; samp++)
{
sample_covariance[i*N+j] += samples[samp*N+i]*samples[samp*N+j];
}
sample_covariance[i*N+j] = sample_covariance[i*N+j]/nsamps;
}
__global__ void update_alpha_estimate(float* alpha, float* sample_mean, float* data_mean, float eta, int N)
{
int i = blockIdx.x;
{
alpha[i] += eta*(sample_mean[i] - data_mean[i]);
}
}
__global__ void update_beta_estimate(float* beta, float* sample_covariance, float* data_covariance, float eta, int N)
{
int i = blockIdx.x;
{
alpha[i] += eta*(sample_mean[i] - data_mean[i]);
}
}
int write_paths_to_csv(char* fname, float* paths, int n_paths, int nt, char* mode)
{
FILE *outfile;
outfile = fopen(fname, mode);
int i, j;
if(outfile != NULL)
{
for(i = 0; i<n_paths; i++)
{
for(j = 0; j<nt; j++)
{
fprintf(outfile, "%f,", paths[i*nt + j]);
}
fprintf(outfile, "\n");
}
}
fclose(outfile);
return 0;
}
int main()
{
int N = 20; /* Number of Neurons */
int samps_per_block = 1024;
int nblocks = 1024;
int nsamps_tot = samps_per_block*nblocks;
int nsweeps = 10;
/* Compute sizes of various data structures */
size_t mean_size = N*sizeof(float);
size_t cov_size = (N*(N+1)/2)*sizeof(float);
size_t full_cov_size = N*N*sizeof(float);
size_t samples_size = N*nsamps_tot*sizeof(float);
/* allocate result memory */
float *alpha_res = (float*)malloc(mean_size);
float *beta_res = (float*)malloc(cov_size);
float *sample_mean_res = (float*)malloc(mean_size);
float *sample_covariance_res = (float*)malloc(full_cov_size);
float *samples_res = (float*)malloc(samples_size);
/* Allocate device memory */
float *sample_mean;
float *sample_covariance;
float *samples;
float *alpha;
float *beta;
dim3 cov_block(N, N);
cudaMalloc(&sample_mean, mean_size);
cudaMalloc(&sample_covariance, full_cov_size);
cudaMalloc(&samples, samples_size);
cudaMalloc(&alpha, mean_size);
cudaMalloc(&beta, cov_size);
curandState *d_state;
cudaMalloc(&d_state, nblocks);
/* generate random initial conditoins */
srand(time(NULL));
int i;
for(i=0; i<N; i++)
{
alpha_res[i] = (2*((float)rand()/(float)RAND_MAX) - 1);
/*printf("%f\n", alpha_res[i]);*/
}
for(i=0; i<(N*(N+1)/2); i++)
{
beta_res[i]= 0.1*(2*((float)rand()/(float)RAND_MAX) - 1);
}
/* copy initial conditions over to device */
cudaMemcpy(alpha, alpha_res, mean_size, cudaMemcpyHostToDevice);
cudaMemcpy(beta, beta_res, cov_size, cudaMemcpyHostToDevice);
/* sample */
printf("Sampling...\n");
init_rand<<<nblocks, 1>>>(d_state, time(NULL));
gibbs_sample<<<nblocks, 1>>>(d_state, alpha, beta, samples, N, samps_per_block, nsweeps);
printf("Finished. nsamps=%d\n", nsamps_tot);
printf("Computing Sample mean...\n");
compute_sample_mean<<<N, 1>>>(samples, sample_mean, nsamps_tot, N);
compute_sample_covariance<<<N, N>>>(samples, sample_covariance, nsamps_tot, N);
/*copy back sample mean*/
cudaMemcpy(sample_mean_res, sample_mean, mean_size, cudaMemcpyDeviceToHost);
cudaMemcpy(samples_res, samples, samples_size, cudaMemcpyDeviceToHost);
cudaMemcpy(sample_covariance_res, sample_covariance, full_cov_size, cudaMemcpyDeviceToHost);
/* Display*/
for(i=0; i<N; i++)
{
printf("%f\n", sample_mean_res[i]);
}
write_paths_to_csv("maxent_samples.csv", samples_res, nsamps_tot, N, "w");
write_paths_to_csv("maxent_cov.csv", sample_covariance_res, N, N, "w");
/* free memory */
free(alpha_res);
free(beta_res);
free(sample_mean_res);
cudaFree(sample_mean);
cudaFree(sample_covariance);
cudaFree(samples);
cudaFree(alpha);
cudaFree(beta);
cudaFree(d_state);
return 0;
}
|
ca8fb20741c14680365e633d09805c51509774c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "argmax_impl.cuh"
#include "device/gpu/cuda_common.h"
#include "include/hip/hip_fp16.h"
template <typename T>
__global__ void Argmax1D(const T* input, const int channel_size, int* output) {
int max_index = 0;
T max = input[0];
for (int pos = 1; pos < channel_size; pos++) {
if (max < input[pos]) {
max = input[pos];
max_index = pos;
}
}
output[0] = max_index;
return;
}
template <typename T>
__global__ void ArgmaxDefault2D(const T* input, const int batch_size, const int channel_size, int* output) {
int pos;
int max_index;
T max;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size; i += blockDim.x * gridDim.x) {
max = input[i * channel_size];
max_index = 0;
for (int j = 1; j < channel_size; j++) {
pos = i * channel_size + j;
if (max < input[pos]) {
max = input[pos];
max_index = j;
}
}
output[i] = max_index;
}
return;
}
template <typename T>
__global__ void ArgmaxAxis2D(const T* input, const int batch_size, const int channel_size, int* output) {
int pos;
int max_index;
T max;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channel_size; i += blockDim.x * gridDim.x) {
max = input[i];
max_index = 0;
for (int j = 1; j < batch_size; j++) {
pos = j * channel_size + i;
if (max < input[pos]) {
max = input[pos];
max_index = j;
}
}
output[i] = max_index;
}
return;
}
template <typename T>
void CalArgmax(const T* input, const int batch_size, const int channel_size, const int axis, int* output,
hipStream_t cuda_stream) {
if (batch_size == 0) {
hipLaunchKernelGGL(( Argmax1D), dim3(1), dim3(1), 0, cuda_stream, input, channel_size, output);
} else if (axis == 1) {
hipLaunchKernelGGL(( ArgmaxDefault2D), dim3(GET_BLOCKS(batch_size)), dim3(GET_THREADS), 0, cuda_stream, input, batch_size, channel_size, output);
} else {
hipLaunchKernelGGL(( ArgmaxAxis2D), dim3(GET_BLOCKS(channel_size)), dim3(GET_THREADS), 0, cuda_stream, input, batch_size, channel_size, output);
}
return;
}
template void CalArgmax<float>(const float* input, const int batch_size, const int channel_size, const int axis,
int* output, hipStream_t cuda_stream);
template void CalArgmax<half>(const half* input, const int batch_size, const int channel_size, const int axis,
int* output, hipStream_t cuda_stream);
| ca8fb20741c14680365e633d09805c51509774c5.cu | /**
* Copyright 2019 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "argmax_impl.cuh"
#include "device/gpu/cuda_common.h"
#include "include/cuda_fp16.h"
template <typename T>
__global__ void Argmax1D(const T* input, const int channel_size, int* output) {
int max_index = 0;
T max = input[0];
for (int pos = 1; pos < channel_size; pos++) {
if (max < input[pos]) {
max = input[pos];
max_index = pos;
}
}
output[0] = max_index;
return;
}
template <typename T>
__global__ void ArgmaxDefault2D(const T* input, const int batch_size, const int channel_size, int* output) {
int pos;
int max_index;
T max;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < batch_size; i += blockDim.x * gridDim.x) {
max = input[i * channel_size];
max_index = 0;
for (int j = 1; j < channel_size; j++) {
pos = i * channel_size + j;
if (max < input[pos]) {
max = input[pos];
max_index = j;
}
}
output[i] = max_index;
}
return;
}
template <typename T>
__global__ void ArgmaxAxis2D(const T* input, const int batch_size, const int channel_size, int* output) {
int pos;
int max_index;
T max;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < channel_size; i += blockDim.x * gridDim.x) {
max = input[i];
max_index = 0;
for (int j = 1; j < batch_size; j++) {
pos = j * channel_size + i;
if (max < input[pos]) {
max = input[pos];
max_index = j;
}
}
output[i] = max_index;
}
return;
}
template <typename T>
void CalArgmax(const T* input, const int batch_size, const int channel_size, const int axis, int* output,
cudaStream_t cuda_stream) {
if (batch_size == 0) {
Argmax1D<<<1, 1, 0, cuda_stream>>>(input, channel_size, output);
} else if (axis == 1) {
ArgmaxDefault2D<<<GET_BLOCKS(batch_size), GET_THREADS, 0, cuda_stream>>>(input, batch_size, channel_size, output);
} else {
ArgmaxAxis2D<<<GET_BLOCKS(channel_size), GET_THREADS, 0, cuda_stream>>>(input, batch_size, channel_size, output);
}
return;
}
template void CalArgmax<float>(const float* input, const int batch_size, const int channel_size, const int axis,
int* output, cudaStream_t cuda_stream);
template void CalArgmax<half>(const half* input, const int batch_size, const int channel_size, const int axis,
int* output, cudaStream_t cuda_stream);
|
2bb11d428aae5c61cbfd63a41604e7f5b97199a3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
DeviceScope scope(binaryFlatConfig_.device);
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {}
int GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
std::shared_ptr<GpuResources> GpuIndexBinaryFlat::getResources() {
return resources_;
}
void GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(
index->ntotal <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t)std::numeric_limits<int>::max(),
(size_t)index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(
index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(
data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n, const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(
this->ntotal + n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
data_->add(
(const unsigned char*)x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void GpuIndexBinaryFlat::search(
faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels) const {
if (n == 0) {
return;
}
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int)n, (int)k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(
n, x, k, outDistances.data(), outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int)n, (int)k});
// Convert int to idx_t
convertTensor<int, Index::idx_t, 2>(stream, outIntIndices, outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outIndices, labels, stream);
}
void GpuIndexBinaryFlat::searchNonPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int)(this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void GpuIndexBinaryFlat::searchFromCpuPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = ::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void GpuIndexBinaryFlat::reconstruct(
faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} // namespace gpu
} // namespace faiss
| 2bb11d428aae5c61cbfd63a41604e7f5b97199a3.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/GpuIndexBinaryFlat.h>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/impl/BinaryFlatIndex.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
namespace faiss {
namespace gpu {
/// Default CPU search size for which we use paged copies
constexpr size_t kMinPageSize = (size_t)256 * 1024 * 1024;
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
const faiss::IndexBinaryFlat* index,
GpuIndexBinaryFlatConfig config)
: IndexBinary(index->d),
resources_(provider->getResources()),
binaryFlatConfig_(config) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
copyFrom(index);
}
GpuIndexBinaryFlat::GpuIndexBinaryFlat(
GpuResourcesProvider* provider,
int dims,
GpuIndexBinaryFlatConfig config)
: IndexBinary(dims),
resources_(provider->getResources()),
binaryFlatConfig_(std::move(config)) {
FAISS_THROW_IF_NOT_FMT(
this->d % 8 == 0,
"vector dimension (number of bits) "
"must be divisible by 8 (passed %d)",
this->d);
// Flat index doesn't need training
this->is_trained = true;
// Construct index
DeviceScope scope(binaryFlatConfig_.device);
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
}
GpuIndexBinaryFlat::~GpuIndexBinaryFlat() {}
int GpuIndexBinaryFlat::getDevice() const {
return binaryFlatConfig_.device;
}
std::shared_ptr<GpuResources> GpuIndexBinaryFlat::getResources() {
return resources_;
}
void GpuIndexBinaryFlat::copyFrom(const faiss::IndexBinaryFlat* index) {
DeviceScope scope(binaryFlatConfig_.device);
this->d = index->d;
// GPU code has 32 bit indices
FAISS_THROW_IF_NOT_FMT(
index->ntotal <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices; "
"attempting to copy CPU index with %zu parameters",
(size_t)std::numeric_limits<int>::max(),
(size_t)index->ntotal);
this->ntotal = index->ntotal;
// destroy old first before allocating new
data_.reset();
data_.reset(new BinaryFlatIndex(
resources_.get(), this->d, binaryFlatConfig_.memorySpace));
// The index could be empty
if (index->ntotal > 0) {
data_->add(
index->xb.data(),
index->ntotal,
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::copyTo(faiss::IndexBinaryFlat* index) const {
DeviceScope scope(binaryFlatConfig_.device);
index->d = this->d;
index->ntotal = this->ntotal;
FAISS_ASSERT(data_);
FAISS_ASSERT(data_->getSize() == this->ntotal);
index->xb.resize(this->ntotal * (this->d / 8));
if (this->ntotal > 0) {
fromDevice(
data_->getVectorsRef(),
index->xb.data(),
resources_->getDefaultStream(binaryFlatConfig_.device));
}
}
void GpuIndexBinaryFlat::add(faiss::IndexBinary::idx_t n, const uint8_t* x) {
DeviceScope scope(binaryFlatConfig_.device);
// To avoid multiple re-allocations, ensure we have enough storage
// available
data_->reserve(n, resources_->getDefaultStream(binaryFlatConfig_.device));
// Due to GPU indexing in int32, we can't store more than this
// number of vectors on a GPU
FAISS_THROW_IF_NOT_FMT(
this->ntotal + n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
data_->add(
(const unsigned char*)x,
n,
resources_->getDefaultStream(binaryFlatConfig_.device));
this->ntotal += n;
}
void GpuIndexBinaryFlat::reset() {
DeviceScope scope(binaryFlatConfig_.device);
// Free the underlying memory
data_->reset();
this->ntotal = 0;
}
void GpuIndexBinaryFlat::search(
faiss::IndexBinary::idx_t n,
const uint8_t* x,
faiss::IndexBinary::idx_t k,
int32_t* distances,
faiss::IndexBinary::idx_t* labels) const {
if (n == 0) {
return;
}
// For now, only support <= max int results
FAISS_THROW_IF_NOT_FMT(
n <= (Index::idx_t)std::numeric_limits<int>::max(),
"GPU index only supports up to %zu indices",
(size_t)std::numeric_limits<int>::max());
FAISS_THROW_IF_NOT_FMT(
k <= (Index::idx_t)getMaxKSelection(),
"GPU only supports k <= %d (requested %d)",
getMaxKSelection(),
(int)k); // select limitation
DeviceScope scope(binaryFlatConfig_.device);
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// The input vectors may be too large for the GPU, but we still
// assume that the output distances and labels are not.
// Go ahead and make space for output distances and labels on the
// GPU.
// If we reach a point where all inputs are too big, we can add
// another level of tiling.
auto outDistances = toDeviceTemporary<int32_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
distances,
stream,
{(int)n, (int)k});
// FlatIndex only supports an interface returning int indices
DeviceTensor<int, 2, true> outIntIndices(
resources_.get(),
makeTempAlloc(AllocType::Other, stream),
{(int)n, (int)k});
bool usePaged = false;
if (getDeviceForAddress(x) == -1) {
// It is possible that the user is querying for a vector set size
// `x` that won't fit on the GPU.
// In this case, we will have to handle paging of the data from CPU
// -> GPU.
// Currently, we don't handle the case where the output data won't
// fit on the GPU (e.g., n * k is too large for the GPU memory).
size_t dataSize = (size_t)n * (this->d / 8) * sizeof(uint8_t);
if (dataSize >= kMinPageSize) {
searchFromCpuPaged_(
n, x, k, outDistances.data(), outIntIndices.data());
usePaged = true;
}
}
if (!usePaged) {
searchNonPaged_(n, x, k, outDistances.data(), outIntIndices.data());
}
// Convert and copy int indices out
auto outIndices = toDeviceTemporary<Index::idx_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
labels,
stream,
{(int)n, (int)k});
// Convert int to idx_t
convertTensor<int, Index::idx_t, 2>(stream, outIntIndices, outIndices);
// Copy back if necessary
fromDevice<int32_t, 2>(outDistances, distances, stream);
fromDevice<Index::idx_t, 2>(outIndices, labels, stream);
}
void GpuIndexBinaryFlat::searchNonPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
// Make sure arguments are on the device we desire; use temporary
// memory allocations to move it if necessary
auto vecs = toDeviceTemporary<uint8_t, 2>(
resources_.get(),
binaryFlatConfig_.device,
const_cast<uint8_t*>(x),
stream,
{n, (int)(this->d / 8)});
data_->query(vecs, k, outDistances, outIndices);
}
void GpuIndexBinaryFlat::searchFromCpuPaged_(
int n,
const uint8_t* x,
int k,
int32_t* outDistancesData,
int* outIndicesData) const {
Tensor<int32_t, 2, true> outDistances(outDistancesData, {n, k});
Tensor<int, 2, true> outIndices(outIndicesData, {n, k});
auto vectorSize = sizeof(uint8_t) * (this->d / 8);
// Just page without overlapping copy with compute (as GpuIndexFlat does)
int batchSize = utils::nextHighestPowerOf2(
(int)((size_t)kMinPageSize / vectorSize));
for (int cur = 0; cur < n; cur += batchSize) {
int num = std::min(batchSize, n - cur);
auto outDistancesSlice = outDistances.narrowOutermost(cur, num);
auto outIndicesSlice = outIndices.narrowOutermost(cur, num);
searchNonPaged_(
num,
x + (size_t)cur * (this->d / 8),
k,
outDistancesSlice.data(),
outIndicesSlice.data());
}
}
void GpuIndexBinaryFlat::reconstruct(
faiss::IndexBinary::idx_t key,
uint8_t* out) const {
DeviceScope scope(binaryFlatConfig_.device);
FAISS_THROW_IF_NOT_MSG(key < this->ntotal, "index out of bounds");
auto stream = resources_->getDefaultStream(binaryFlatConfig_.device);
auto& vecs = data_->getVectorsRef();
auto vec = vecs[key];
fromDevice(vec.data(), out, vecs.getSize(1), stream);
}
} // namespace gpu
} // namespace faiss
|
b29c0068e93bb8a03a4df7a01d83e18f72ed3cc9.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS ( 1024*1024 )
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 32 // number of threads per block
#endif
#define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE )
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf( float, float );
int Ranf( int, int );
void TimeOfDaySeed( );
__global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits )
{
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber*wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf( (float)( (M_PI/180.) * 30. ) );
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn * tn;
float b = -2. * (xc + yc * tn);
float c = xc * xc + yc * yc - r * r;
float d = b * b - 4. * a * c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if (d >= 0)
{
// hits the circle:
// get the first intersection:
d = sqrt(d);
float t1 = (-b + d) / (2. * a); // time to intersect the circle
float t2 = (-b - d) / (2. * a); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
//If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for - loop.
if (tmin < 0)
{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin * tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float nxy = sqrt(nx * nx + ny * ny);
nx /= nxy; // unit vector
ny /= nxy; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt(inx * inx + iny * iny);
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx * nx + iny * ny;
float outx = inx - 2. * nx * dot; // angle of reflection = angle of incidence`
float outy = iny - 2. * ny * dot; // angle of reflection = angle of incidence`
//If tt is less than 0., then the reflected beam went up instead of down.Continue on to the next trial in the for - loop.
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
if( t >= 0. )
{
Hits[gid] = 1;
}
}
}
}
// main program:
int
main( int argc, char* argv[ ] )
{
TimeOfDaySeed( );
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float * hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for( int n = 0; n < NUMTRIALS; n++ )
{
hXcs[n] = Ranf( XCMIN, XCMAX );
hYcs[n] = Ranf( YCMIN, YCMAX );
hRs[n] = Ranf( RMIN, RMAX );
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs( NUMTRIALS, 1, 1 );
dim3 dimsYcs( NUMTRIALS, 1, 1 );
dim3 dimsRs( NUMTRIALS, 1, 1 );
dim3 dimsHits( NUMTRIALS, 1, 1 );
hipError_t status;
status = hipMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = hipMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = hipMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
status = hipMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid(NUMBLOCKS, 1, 1 );
// create and start timer
hipDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
hipEvent_t start, stop;
status = hipEventCreate( &start );
checkCudaErrors( status );
status = hipEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = hipEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dXcs, dYcs, dRs, dHits );
// record the stop event:
status = hipEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = hipEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = hipEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
fprintf( stderr, "Number of Trials = %10d, BlockSize= %10d MegaTrials/Second = %10.4lf\n", NUMTRIALS, BLOCKSIZE, megaTrialsPerSecond );
// copy result from the device to the host:
status = hipMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), hipMemcpyDeviceToHost );
checkCudaErrors( status );
hipDeviceSynchronize( );
// compute the probability:
int numHits = 0;
for(int i = 0; i < NUMTRIALS; i++ )
{
numHits += hHits[i];
}
float probability = 100.f * (float)numHits / (float)NUMTRIALS;
fprintf(stderr, "\nProbability = %6.3f %%\n", probability );
// clean up memory:
delete [ ] hXcs;
delete [ ] hYcs;
delete [ ] hRs;
delete [ ] hHits;
status = hipFree( dXcs );
status = hipFree( dYcs );
status = hipFree( dRs );
status = hipFree( dHits );
checkCudaErrors( status );
return 0;
}
float
Ranf( float low, float high )
{
float r = (float) rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
int
Ranf( int ilow, int ihigh )
{
float low = (float)ilow;
float high = ceil( (float)ihigh );
return (int) Ranf(low,high);
}
void
TimeOfDaySeed( )
{
struct tm y2k = { 0 };
y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0;
y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
| b29c0068e93bb8a03a4df7a01d83e18f72ed3cc9.cu | // System includes
#include <stdio.h>
#include <assert.h>
#include <malloc.h>
#include <math.h>
#include <stdlib.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include "helper_functions.h"
#include "helper_cuda.h"
// setting the number of trials in the monte carlo simulation:
#ifndef NUMTRIALS
#define NUMTRIALS ( 1024*1024 )
#endif
#ifndef BLOCKSIZE
#define BLOCKSIZE 32 // number of threads per block
#endif
#define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE )
// ranges for the random numbers:
const float XCMIN = 0.0;
const float XCMAX = 2.0;
const float YCMIN = 0.0;
const float YCMAX = 2.0;
const float RMIN = 0.5;
const float RMAX = 2.0;
// function prototypes:
float Ranf( float, float );
int Ranf( int, int );
void TimeOfDaySeed( );
__global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits )
{
unsigned int wgNumber = blockIdx.x;
unsigned int wgDimension = blockDim.x;
unsigned int threadNum = threadIdx.x;
unsigned int gid = wgNumber*wgDimension + threadNum;
// all the monte carlo stuff goes in here
// if we make it all the way through, then Hits[gid] = 1
// randomize the location and radius of the circle:
float xc = Xcs[gid];
float yc = Ycs[gid];
float r = Rs[gid];
float tn = tanf( (float)( (M_PI/180.) * 30. ) );
Hits[gid] = 0;
// solve for the intersection using the quadratic formula:
float a = 1. + tn * tn;
float b = -2. * (xc + yc * tn);
float c = xc * xc + yc * yc - r * r;
float d = b * b - 4. * a * c;
// cascading if-statements:
// if you used "continue;" in project #1, change to this style because,
// if there is no for-loop, then there is nowhere to continue to
if (d >= 0)
{
// hits the circle:
// get the first intersection:
d = sqrt(d);
float t1 = (-b + d) / (2. * a); // time to intersect the circle
float t2 = (-b - d) / (2. * a); // time to intersect the circle
float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection
//If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) Continue on to the next trial in the for - loop.
if (tmin < 0)
{
// where does it intersect the circle?
float xcir = tmin;
float ycir = tmin * tn;
// get the unitized normal vector at the point of intersection:
float nx = xcir - xc;
float ny = ycir - yc;
float nxy = sqrt(nx * nx + ny * ny);
nx /= nxy; // unit vector
ny /= nxy; // unit vector
// get the unitized incoming vector:
float inx = xcir - 0.;
float iny = ycir - 0.;
float in = sqrt(inx * inx + iny * iny);
inx /= in; // unit vector
iny /= in; // unit vector
// get the outgoing (bounced) vector:
float dot = inx * nx + iny * ny;
float outx = inx - 2. * nx * dot; // angle of reflection = angle of incidence`
float outy = iny - 2. * ny * dot; // angle of reflection = angle of incidence`
//If tt is less than 0., then the reflected beam went up instead of down.Continue on to the next trial in the for - loop.
// find out if it hits the infinite plate:
float t = ( 0. - ycir ) / outy;
if( t >= 0. )
{
Hits[gid] = 1;
}
}
}
}
// main program:
int
main( int argc, char* argv[ ] )
{
TimeOfDaySeed( );
int dev = findCudaDevice(argc, (const char **)argv);
// allocate host memory:
float *hXcs = new float[NUMTRIALS];
float *hYcs = new float[NUMTRIALS];
float * hRs = new float[NUMTRIALS];
int *hHits = new int[NUMTRIALS];
// fill the random-value arrays:
for( int n = 0; n < NUMTRIALS; n++ )
{
hXcs[n] = Ranf( XCMIN, XCMAX );
hYcs[n] = Ranf( YCMIN, YCMAX );
hRs[n] = Ranf( RMIN, RMAX );
}
// allocate device memory:
float *dXcs, *dYcs, *dRs;
int *dHits;
dim3 dimsXcs( NUMTRIALS, 1, 1 );
dim3 dimsYcs( NUMTRIALS, 1, 1 );
dim3 dimsRs( NUMTRIALS, 1, 1 );
dim3 dimsHits( NUMTRIALS, 1, 1 );
cudaError_t status;
status = cudaMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) );
checkCudaErrors( status );
status = cudaMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) );
checkCudaErrors( status );
// copy host memory to the device:
status = cudaMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
status = cudaMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice );
checkCudaErrors( status );
// setup the execution parameters:
dim3 threads(BLOCKSIZE, 1, 1 );
dim3 grid(NUMBLOCKS, 1, 1 );
// create and start timer
cudaDeviceSynchronize( );
// allocate CUDA events that we'll use for timing:
cudaEvent_t start, stop;
status = cudaEventCreate( &start );
checkCudaErrors( status );
status = cudaEventCreate( &stop );
checkCudaErrors( status );
// record the start event:
status = cudaEventRecord( start, NULL );
checkCudaErrors( status );
// execute the kernel:
MonteCarlo<<< grid, threads >>>( dXcs, dYcs, dRs, dHits );
// record the stop event:
status = cudaEventRecord( stop, NULL );
checkCudaErrors( status );
// wait for the stop event to complete:
status = cudaEventSynchronize( stop );
checkCudaErrors( status );
float msecTotal = 0.0f;
status = cudaEventElapsedTime( &msecTotal, start, stop );
checkCudaErrors( status );
// compute and print the performance
double secondsTotal = 0.001 * (double)msecTotal;
double trialsPerSecond = (float)NUMTRIALS / secondsTotal;
double megaTrialsPerSecond = trialsPerSecond / 1000000.;
fprintf( stderr, "Number of Trials = %10d, BlockSize= %10d MegaTrials/Second = %10.4lf\n", NUMTRIALS, BLOCKSIZE, megaTrialsPerSecond );
// copy result from the device to the host:
status = cudaMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), cudaMemcpyDeviceToHost );
checkCudaErrors( status );
cudaDeviceSynchronize( );
// compute the probability:
int numHits = 0;
for(int i = 0; i < NUMTRIALS; i++ )
{
numHits += hHits[i];
}
float probability = 100.f * (float)numHits / (float)NUMTRIALS;
fprintf(stderr, "\nProbability = %6.3f %%\n", probability );
// clean up memory:
delete [ ] hXcs;
delete [ ] hYcs;
delete [ ] hRs;
delete [ ] hHits;
status = cudaFree( dXcs );
status = cudaFree( dYcs );
status = cudaFree( dRs );
status = cudaFree( dHits );
checkCudaErrors( status );
return 0;
}
float
Ranf( float low, float high )
{
float r = (float) rand(); // 0 - RAND_MAX
float t = r / (float) RAND_MAX; // 0. - 1.
return low + t * ( high - low );
}
int
Ranf( int ilow, int ihigh )
{
float low = (float)ilow;
float high = ceil( (float)ihigh );
return (int) Ranf(low,high);
}
void
TimeOfDaySeed( )
{
struct tm y2k = { 0 };
y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0;
y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1;
time_t timer;
time( &timer );
double seconds = difftime( timer, mktime(&y2k) );
unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds
srand( seed );
}
|
02fe7a43064c9c2283dc9065a345e04efe20beca.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "MDSystem_interface.h"
#include "common.h"
#include "BoxGeometry.h"
#include "MDSystem.h"
#include "RandomGenerator.h"
#include "Auxiliary.h"
#include "NeighborList_interface.h"
#include"Statistic.h"
#include "Integrator_interface.h"
#include "InteractionEngine_interface.h"
#include "tmp.h"
#include "Reshuffle_interface.h"
#include "Displacement_interface.h"
#include "AssignRCut.h"
#include "Topology.h"
#include "SystemBondedInteraction.h"
#include "BondInteraction.h"
#include "NonBondedInteraction.h"
#include "PressureCorrection.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <fftw3.h>
// #define NThreadsPerBlockCell 32
// #define NThreadsPerBlockAtom 4
#define NThreadsPerBlockCell 96
#define NThreadsPerBlockAtom 96
#include "DensityProfile.h"
int main(int argc, char * argv[])
{
IndexType nstep = 10000;
IndexType confFeq = 2000;
IndexType thermoFeq = 100;
ScalorType dt = 0.005;
ScalorType rcut = 5.0;
ScalorType nlistExten = 0.49;
ScalorType refT = 1.20;
ScalorType tauT = 1.0;
char * filename;
IndexType densityProfileSamplingFeq = 40;
IndexType rcutAssignFeq = 40;
IndexType rcutUpdateFeq = 2000;
double refh = 1.0;
double rcmin = 03.0;
double rcmax = 10.0;
double rcstep = 0.5;
double targetPrec = 0.012;
if (argc != 4){
printf ("Usage:\n%s conf.gro nstep device\n", argv[0]);
return 1;
}
if (argc != 1){
nstep = atoi(argv[2]);
filename = argv[1];
}
printf ("# setting device to %d\n", atoi(argv[3]));
hipSetDevice (atoi(argv[3]));
checkCUDAError ("set device");
MDSystem sys;
sys.initConfig(filename);
Topology::System sysTop;
Topology::Molecule mol;
mol.pushAtom (Topology::Atom (1.0, 0.0, 0));
LennardJones6_12Parameter ljparam;
ljparam.reinit (1.f, 1.f, 0.f, rcut);
sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam));
sysTop.addMolecules (mol, sys.hdata.numAtom);
sys.initTopology (sysTop);
sys.initDeviceData ();
DensityProfile_PiecewiseConst dp;
printf ("# init DensityProfile_PiecewiseConst\n");
dp.reinit (sys.box.size.x, sys.box.size.y, sys.box.size.z, refh);
AdaptRCut arc;
printf ("# init AdaptRCut\n");
arc.reinit (rcmin, rcmax, rcstep, dp);
AssignRCut assign_rcut;
printf ("# init AssignRCut\n");
assign_rcut.reinit (sys, arc, NThreadsPerBlockAtom);
assign_rcut.uniform (rcut);
assign_rcut.print_x ("rcut.x.out");
assign_rcut.assign (sys);
PressureCorrection pc (arc, dp);
ScalorType pcxx, pcyy, pczz;
pcxx = pcyy = pczz = 0.;
SystemNonBondedInteraction sysNbInter;
sysNbInter.reinit (sysTop);
ScalorType energyCorr = sysNbInter.energyCorrection ();
ScalorType pressureCorr = sysNbInter.pressureCorrection ();
ScalorType maxrcut = sysNbInter.maxRcut();
ScalorType rlist = maxrcut + nlistExten;
CellList clist (sys, rcmax+nlistExten, NThreadsPerBlockCell, NThreadsPerBlockAtom);
CellList clist_resh (sys, rcmin, NThreadsPerBlockCell, NThreadsPerBlockAtom);
NeighborList nlist (sysNbInter, sys, rlist, nlistExten, NThreadsPerBlockAtom, 4.f);
sys.normalizeDeviceData ();
clist.rebuild (sys, NULL);
clist_resh.rebuild (sys, NULL);
nlist.rebuild (sys, clist, NULL);
Displacement_max disp (sys, NThreadsPerBlockAtom);
disp.recordCoord (sys);
MDStatistic st(sys);
TranslationalFreedomRemover tfremover (sys, NThreadsPerBlockAtom);
InteractionEngine inter (sys, NThreadsPerBlockAtom);
inter.registNonBondedInteraction (sysNbInter);
MDTimer timer;
unsigned i;
ScalorType seed = 1;
RandomGenerator_MT19937::init_genrand (seed);
VelocityVerlet inte_vv (sys, NThreadsPerBlockAtom);
VelocityRescale inte_vr (sys, NThreadsPerBlockAtom, refT, 0.1);
NoseHoover_Chains2 nhc;
nhc.reinit (sys, NThreadsPerBlockAtom, refT, tauT);
Reshuffle resh (sys);
timer.tic(mdTimeTotal);
if (resh.calIndexTable (clist_resh, &timer)){
sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
}
printf ("# prepare ok, start to run\n");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataGro ("confstart.gro", 0, 0.f, &timer);
dp.init_write ("density.dtj");
assign_rcut.init_write ("rcut.rtj");
printf ("# prepare ok, start to run\n");
printf ("#* 1 2 3 4 5 6 7 8 9 10 11 12 13 14\n");
printf ("#* nstep time nonBondedE kineticE temperature totalE NHC_Hamiltonian pressureXX pressureYY pressureZZ s_tension pcxx pcyy tc\n");
try{
sys.initWriteXtc ("traj.xtc");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataXtc (0, 0*dt, &timer);
for (i = 0; i < nstep; ++i){
if (i%1 == 0){
tfremover.remove (sys, &timer);
}
nhc.operator_L (0.5 * dt, sys, &timer);
inte_vv.step1 (sys, dt, &timer);
st.clearDevice();
inter.clearInteraction (sys);
ScalorType maxdr = disp.calMaxDisplacemant (sys, &timer);
if (maxdr > nlistExten * 0.5){
// printf ("# Rebuild at step %09i ... ", i+1);
// fflush(stdout);
// // rebuild
sys.normalizeDeviceData (&timer);
disp.recordCoord (sys);
clist.rebuild (sys, &timer);
clist_resh.rebuild (sys, &timer);
nlist.rebuild (sys, clist, &timer);
// printf ("done\n");
// fflush(stdout);
}
inter.applyNonBondedInteraction (sys, nlist, st, NULL, &timer);
if ((i) % rcutAssignFeq == 0){
timer.tic (mdTimeAdaptRCut);
assign_rcut.assign (sys);
timer.toc (mdTimeAdaptRCut);
}
inte_vv.step2 (sys, dt, &timer);
if ((i+1) % thermoFeq == 0){
nhc.operator_L (0.5 * dt, sys, st, &timer);
}
else {
nhc.operator_L (0.5 * dt, sys, &timer);
}
if ((i+1) % thermoFeq == 0){
timer.tic (mdTimeDataIO);
st.updateHost ();
ScalorType px = st.pressureXX (sys.box);
ScalorType py = st.pressureYY (sys.box);
ScalorType pz = st.pressureZZ (sys.box);
printf ("%09d %05e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.2e\n",
(i+1),
(i+1) * dt,
st.nonBondedEnergy(),
st.kineticEnergy(),
st.kineticEnergy() * 2. / 3. / (double (sys.hdata.numAtom) - 3.),
st.nonBondedEnergy() +
st.kineticEnergy(),
st.nonBondedEnergy() +
st.kineticEnergy() +
nhc.HamiltonianContribution (),
px, py, pz,
(px - (py + pz) * 0.5) * sys.box.size.x * 0.5,
pcxx,
pcyy,
(pcxx - (pcyy + pczz) * 0.5) * sys.box.size.x * 0.5,
double (nlist.calSumNeighbor ())
);
fflush(stdout);
timer.toc (mdTimeDataIO);
}
if ((i+1) % densityProfileSamplingFeq == 0) {
timer.tic (mdTimeDensityProfile);
sys.updateHostFromDevice (NULL);
dp.deposite (sys.hdata.coord, sys.hdata.numAtom);
timer.toc (mdTimeDensityProfile);
}
if ((i+1) % rcutUpdateFeq == 0) {
// printf ("# update rcut\n");
timer.tic (mdTimeDensityProfile);
dp.calculate ();
dp.print_x ("density.x.out");
timer.toc (mdTimeDensityProfile);
timer.tic (mdTimeAdaptRCut);
arc.calError (dp);
arc.calRCut (targetPrec);
arc.print_x ("error.x.out");
assign_rcut.getRCut (arc);
assign_rcut.print_x ("rcut.x.out");
pc.correction (arc, dp);
pcxx = pc.pxx;
pcyy = pc.pyy;
pczz = pc.pzz;
timer.toc (mdTimeAdaptRCut);
if (i != nstep - 1) dp.clearData ();
}
if ((i+1) % confFeq == 0){
// printf ("write conf\n");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataXtc (i+1, (i+1)*dt, &timer);
dp.write ((i+1) * dt);
assign_rcut.write ((i+1) * dt);
}
if ((i+1) % 100 == 0){
if (resh.calIndexTable (clist_resh, &timer)){
sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
}
}
}
sys.endWriteXtc();
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataGro ("confout.gro", nstep, nstep*dt, &timer);
timer.toc(mdTimeTotal);
timer.printRecord (stderr);
}
catch (MDExcptCuda & e){
// resh.recoverMDDataToHost (sys, &timer);
// sys.writeHostDataXtc (i+1, (i+1)*dt, &timer);
timer.toc(mdTimeTotal);
timer.printRecord (stderr);
return 1;
}
catch (MDException &e){
fprintf (stderr, "%s\n", e.what());
return 1;
}
dp.end_write();
assign_rcut.end_write();
dp.save ("density.save");
arc.save_rc ("rcut.save");
arc.print_error_avg (dp, "a.error.x.out");
arc.print_rc_avg ("a.rcut.x.out");
return 0;
}
| 02fe7a43064c9c2283dc9065a345e04efe20beca.cu | #include <stdio.h>
#include "MDSystem_interface.h"
#include "common.h"
#include "BoxGeometry.h"
#include "MDSystem.h"
#include "RandomGenerator.h"
#include "Auxiliary.h"
#include "NeighborList_interface.h"
#include"Statistic.h"
#include "Integrator_interface.h"
#include "InteractionEngine_interface.h"
#include "tmp.h"
#include "Reshuffle_interface.h"
#include "Displacement_interface.h"
#include "AssignRCut.h"
#include "Topology.h"
#include "SystemBondedInteraction.h"
#include "BondInteraction.h"
#include "NonBondedInteraction.h"
#include "PressureCorrection.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <fftw3.h>
// #define NThreadsPerBlockCell 32
// #define NThreadsPerBlockAtom 4
#define NThreadsPerBlockCell 96
#define NThreadsPerBlockAtom 96
#include "DensityProfile.h"
int main(int argc, char * argv[])
{
IndexType nstep = 10000;
IndexType confFeq = 2000;
IndexType thermoFeq = 100;
ScalorType dt = 0.005;
ScalorType rcut = 5.0;
ScalorType nlistExten = 0.49;
ScalorType refT = 1.20;
ScalorType tauT = 1.0;
char * filename;
IndexType densityProfileSamplingFeq = 40;
IndexType rcutAssignFeq = 40;
IndexType rcutUpdateFeq = 2000;
double refh = 1.0;
double rcmin = 03.0;
double rcmax = 10.0;
double rcstep = 0.5;
double targetPrec = 0.012;
if (argc != 4){
printf ("Usage:\n%s conf.gro nstep device\n", argv[0]);
return 1;
}
if (argc != 1){
nstep = atoi(argv[2]);
filename = argv[1];
}
printf ("# setting device to %d\n", atoi(argv[3]));
cudaSetDevice (atoi(argv[3]));
checkCUDAError ("set device");
MDSystem sys;
sys.initConfig(filename);
Topology::System sysTop;
Topology::Molecule mol;
mol.pushAtom (Topology::Atom (1.0, 0.0, 0));
LennardJones6_12Parameter ljparam;
ljparam.reinit (1.f, 1.f, 0.f, rcut);
sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam));
sysTop.addMolecules (mol, sys.hdata.numAtom);
sys.initTopology (sysTop);
sys.initDeviceData ();
DensityProfile_PiecewiseConst dp;
printf ("# init DensityProfile_PiecewiseConst\n");
dp.reinit (sys.box.size.x, sys.box.size.y, sys.box.size.z, refh);
AdaptRCut arc;
printf ("# init AdaptRCut\n");
arc.reinit (rcmin, rcmax, rcstep, dp);
AssignRCut assign_rcut;
printf ("# init AssignRCut\n");
assign_rcut.reinit (sys, arc, NThreadsPerBlockAtom);
assign_rcut.uniform (rcut);
assign_rcut.print_x ("rcut.x.out");
assign_rcut.assign (sys);
PressureCorrection pc (arc, dp);
ScalorType pcxx, pcyy, pczz;
pcxx = pcyy = pczz = 0.;
SystemNonBondedInteraction sysNbInter;
sysNbInter.reinit (sysTop);
ScalorType energyCorr = sysNbInter.energyCorrection ();
ScalorType pressureCorr = sysNbInter.pressureCorrection ();
ScalorType maxrcut = sysNbInter.maxRcut();
ScalorType rlist = maxrcut + nlistExten;
CellList clist (sys, rcmax+nlistExten, NThreadsPerBlockCell, NThreadsPerBlockAtom);
CellList clist_resh (sys, rcmin, NThreadsPerBlockCell, NThreadsPerBlockAtom);
NeighborList nlist (sysNbInter, sys, rlist, nlistExten, NThreadsPerBlockAtom, 4.f);
sys.normalizeDeviceData ();
clist.rebuild (sys, NULL);
clist_resh.rebuild (sys, NULL);
nlist.rebuild (sys, clist, NULL);
Displacement_max disp (sys, NThreadsPerBlockAtom);
disp.recordCoord (sys);
MDStatistic st(sys);
TranslationalFreedomRemover tfremover (sys, NThreadsPerBlockAtom);
InteractionEngine inter (sys, NThreadsPerBlockAtom);
inter.registNonBondedInteraction (sysNbInter);
MDTimer timer;
unsigned i;
ScalorType seed = 1;
RandomGenerator_MT19937::init_genrand (seed);
VelocityVerlet inte_vv (sys, NThreadsPerBlockAtom);
VelocityRescale inte_vr (sys, NThreadsPerBlockAtom, refT, 0.1);
NoseHoover_Chains2 nhc;
nhc.reinit (sys, NThreadsPerBlockAtom, refT, tauT);
Reshuffle resh (sys);
timer.tic(mdTimeTotal);
if (resh.calIndexTable (clist_resh, &timer)){
sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
}
printf ("# prepare ok, start to run\n");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataGro ("confstart.gro", 0, 0.f, &timer);
dp.init_write ("density.dtj");
assign_rcut.init_write ("rcut.rtj");
printf ("# prepare ok, start to run\n");
printf ("#* 1 2 3 4 5 6 7 8 9 10 11 12 13 14\n");
printf ("#* nstep time nonBondedE kineticE temperature totalE NHC_Hamiltonian pressureXX pressureYY pressureZZ s_tension pcxx pcyy tc\n");
try{
sys.initWriteXtc ("traj.xtc");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataXtc (0, 0*dt, &timer);
for (i = 0; i < nstep; ++i){
if (i%1 == 0){
tfremover.remove (sys, &timer);
}
nhc.operator_L (0.5 * dt, sys, &timer);
inte_vv.step1 (sys, dt, &timer);
st.clearDevice();
inter.clearInteraction (sys);
ScalorType maxdr = disp.calMaxDisplacemant (sys, &timer);
if (maxdr > nlistExten * 0.5){
// printf ("# Rebuild at step %09i ... ", i+1);
// fflush(stdout);
// // rebuild
sys.normalizeDeviceData (&timer);
disp.recordCoord (sys);
clist.rebuild (sys, &timer);
clist_resh.rebuild (sys, &timer);
nlist.rebuild (sys, clist, &timer);
// printf ("done\n");
// fflush(stdout);
}
inter.applyNonBondedInteraction (sys, nlist, st, NULL, &timer);
if ((i) % rcutAssignFeq == 0){
timer.tic (mdTimeAdaptRCut);
assign_rcut.assign (sys);
timer.toc (mdTimeAdaptRCut);
}
inte_vv.step2 (sys, dt, &timer);
if ((i+1) % thermoFeq == 0){
nhc.operator_L (0.5 * dt, sys, st, &timer);
}
else {
nhc.operator_L (0.5 * dt, sys, &timer);
}
if ((i+1) % thermoFeq == 0){
timer.tic (mdTimeDataIO);
st.updateHost ();
ScalorType px = st.pressureXX (sys.box);
ScalorType py = st.pressureYY (sys.box);
ScalorType pz = st.pressureZZ (sys.box);
printf ("%09d %05e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.2e\n",
(i+1),
(i+1) * dt,
st.nonBondedEnergy(),
st.kineticEnergy(),
st.kineticEnergy() * 2. / 3. / (double (sys.hdata.numAtom) - 3.),
st.nonBondedEnergy() +
st.kineticEnergy(),
st.nonBondedEnergy() +
st.kineticEnergy() +
nhc.HamiltonianContribution (),
px, py, pz,
(px - (py + pz) * 0.5) * sys.box.size.x * 0.5,
pcxx,
pcyy,
(pcxx - (pcyy + pczz) * 0.5) * sys.box.size.x * 0.5,
double (nlist.calSumNeighbor ())
);
fflush(stdout);
timer.toc (mdTimeDataIO);
}
if ((i+1) % densityProfileSamplingFeq == 0) {
timer.tic (mdTimeDensityProfile);
sys.updateHostFromDevice (NULL);
dp.deposite (sys.hdata.coord, sys.hdata.numAtom);
timer.toc (mdTimeDensityProfile);
}
if ((i+1) % rcutUpdateFeq == 0) {
// printf ("# update rcut\n");
timer.tic (mdTimeDensityProfile);
dp.calculate ();
dp.print_x ("density.x.out");
timer.toc (mdTimeDensityProfile);
timer.tic (mdTimeAdaptRCut);
arc.calError (dp);
arc.calRCut (targetPrec);
arc.print_x ("error.x.out");
assign_rcut.getRCut (arc);
assign_rcut.print_x ("rcut.x.out");
pc.correction (arc, dp);
pcxx = pc.pxx;
pcyy = pc.pyy;
pczz = pc.pzz;
timer.toc (mdTimeAdaptRCut);
if (i != nstep - 1) dp.clearData ();
}
if ((i+1) % confFeq == 0){
// printf ("write conf\n");
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataXtc (i+1, (i+1)*dt, &timer);
dp.write ((i+1) * dt);
assign_rcut.write ((i+1) * dt);
}
if ((i+1) % 100 == 0){
if (resh.calIndexTable (clist_resh, &timer)){
sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer);
}
}
}
sys.endWriteXtc();
sys.recoverDeviceData (&timer);
sys.updateHostFromRecovered (&timer);
sys.writeHostDataGro ("confout.gro", nstep, nstep*dt, &timer);
timer.toc(mdTimeTotal);
timer.printRecord (stderr);
}
catch (MDExcptCuda & e){
// resh.recoverMDDataToHost (sys, &timer);
// sys.writeHostDataXtc (i+1, (i+1)*dt, &timer);
timer.toc(mdTimeTotal);
timer.printRecord (stderr);
return 1;
}
catch (MDException &e){
fprintf (stderr, "%s\n", e.what());
return 1;
}
dp.end_write();
assign_rcut.end_write();
dp.save ("density.save");
arc.save_rc ("rcut.save");
arc.print_error_avg (dp, "a.error.x.out");
arc.print_rc_avg ("a.rcut.x.out");
return 0;
}
|
1f78bda4acc413fc54b7fecfed1d76f31c7c31cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != hipSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", hipGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks),dim3(threads), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipMemset(d_out, 0, numbytes);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_padded16), dim3(blocks16x16),dim3(threads16x16), 0, 0, d_in, d_out);
timer.Stop();
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| 1f78bda4acc413fc54b7fecfed1d76f31c7c31cd.cu | #include <stdio.h>
#include "gputimer.h"
const int N= 1024; // matrix size is NxN
const int K= 32; // tile size is KxK
// Utility functions: compare, print, and fill matrices
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line)
{
if (err != cudaSuccess) {
fprintf(stderr, "CUDA error at: %s : %d\n", file,line);
fprintf(stderr, "%s %s\n", cudaGetErrorString(err), func);;
exit(1);
}
}
int compare_matrices(float *gpu, float *ref)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{
// printf("reference(%d,%d) = %f but test(%d,%d) = %f\n",
// i,j,ref[i+j*N],i,j,test[i+j*N]);
result = 1;
}
return result;
}
void print_matrix(float *mat)
{
for(int j=0; j < N; j++)
{
for(int i=0; i < N; i++) { printf("%4.4g ", mat[i + j*N]); }
printf("\n");
}
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in KxK threadblocks
// thread (x,y) in grid writes element (i,j) of output matrix
__global__ void
transpose_parallel_per_element(float in[], float out[])
{
int i = blockIdx.x * K + threadIdx.x;
int j = blockIdx.y * K + threadIdx.y;
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in (tilesize)x(tilesize) threadblocks
// thread blocks read & write tiles, in coalesced fashion
// adjacent threads read adjacent input elements, write adjacent output elmts
__global__ void
transpose_parallel_per_element_tiled16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * K, in_corner_j = blockIdx.y * K;
int out_corner_i = blockIdx.y * K, out_corner_j = blockIdx.x * K;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[K][K+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
// to be launched with one thread per element, in KxK threadblocks
// thread blocks read & write tiles, in coalesced fashion
// shared memory array padded to avoid bank conflicts
__global__ void
transpose_parallel_per_element_tiled_padded16(float in[], float out[])
{
// (i,j) locations of the tile corners for input & output matrices:
int in_corner_i = blockIdx.x * 16, in_corner_j = blockIdx.y * 16;
int out_corner_i = blockIdx.y * 16, out_corner_j = blockIdx.x * 16;
int x = threadIdx.x, y = threadIdx.y;
__shared__ float tile[16][16+1];
// coalesced read from global mem, TRANSPOSED write into shared mem:
tile[y][x] = in[(in_corner_i + x) + (in_corner_j + y)*N];
__syncthreads();
// read from shared mem, coalesced write to global mem:
out[(out_corner_i + x) + (out_corner_j + y)*N] = tile[x][y];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
/*
* Now time each kernel and verify that it produces the correct result.
*
* To be really careful about benchmarking purposes, we should run every kernel once
* to "warm" the system and avoid any compilation or code-caching effects, then run
* every kernel 10 or 100 times and average the timings to smooth out any variance.
* But this makes for messy code and our goal is teaching, not detailed benchmarking.
*/
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks(N/K,N/K); // blocks per grid
dim3 threads(K,K); // threads per block
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_parallel_per_element<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element: %g ms.\nVerifying transpose...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_parallel_per_element_tiled<<<blocks,threads>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
dim3 blocks16x16(N/16,N/16); // blocks per grid
dim3 threads16x16(16,16); // threads per block
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_parallel_per_element_tiled16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled 16x16: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaMemset(d_out, 0, numbytes);
timer.Start();
transpose_parallel_per_element_tiled_padded16<<<blocks16x16,threads16x16>>>(d_in, d_out);
timer.Stop();
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_padded 16x16: %g ms.\nVerifying...%s\n",
timer.Elapsed(), compare_matrices(out, gold) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
111d27781eb3bd621ca89a53347eef371e940206.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include "../stream_compaction/efficient.h"
#define VERTBLOCKSIZE 256
#define FRAGBLOCKSIZE 256
static int width = 0;
static int height = 0;
static Scene *scene = NULL;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_bufVertexOut = NULL;
static Triangle *dev_primitives = NULL;
static int *dev_depth = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static int primitiveCount = 0;
static Triangle *dev_compactionOutput = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
/**
* Clears the depth buffers and primitive buffer.
*/
void clearDepthBuffer() {
hipMemset(dev_depth, scene->farPlane * 10000, width * height * sizeof(int));
hipMemset(dev_depthbuffer, 0.0f, width * height * sizeof(Fragment));
}
/**
* Apply vertex transformations and transfer to vertex out buffer
*/
__global__
void vertexShading(int w, int h, int nearPlane, int farPlane, int vertexCount, const VertexIn *vertexBufferIn,
VertexOut *vertexBufferOut, const glm::mat4 modelView) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < vertexCount) {
glm::vec4 clipCoordinates = modelView * glm::vec4(vertexBufferIn[index].pos, 1.0f);
glm::vec3 normDeviceCoordinates = glm::vec3(clipCoordinates.x, clipCoordinates.y, clipCoordinates.z) / clipCoordinates.w;
vertexBufferOut[index].pos = glm::vec3(w * 0.5f * (normDeviceCoordinates.x + 1.0f),
h * 0.5f * (normDeviceCoordinates.y + 1.0f), 0.5f * ((farPlane - nearPlane)
* normDeviceCoordinates.z + (farPlane + nearPlane)));
vertexBufferOut[index].col = vertexBufferIn[index].col;
vertexBufferOut[index].nor = vertexBufferIn[index].nor;
vertexBufferOut[index].model_pos = vertexBufferIn[index].pos;
}
}
/**
* Assemble primitives from vertex out buffer data.
*/
__global__
void assemblePrimitives(int primitiveCount, const VertexOut *vertexBufferOut, Triangle *primitives, const int *bufIdx) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < primitiveCount) {
for (int i = 0; i < 3; i++) {
primitives[index].v[i] = vertexBufferOut[bufIdx[3 * index + i]];
}
primitives[index].boundingBox = getAABBForTriangle(primitives[index]);
primitives[index].visible = true;
}
}
/**
* Perform scanline rasterization on a triangle
*/
__global__
void rasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
// Only doing scanline triangle atm
int minX = fmaxf(round(primitives[index].boundingBox.min.x), 0.0f), minY = fmaxf(round(primitives[index].boundingBox.min.y), 0.0f);
int maxX = fminf(round(primitives[index].boundingBox.max.x), (float)w);
// Loop through each scanline, then each pixel on the line
for (int y = fminf(round(primitives[index].boundingBox.max.y), (float)h); y >= minY; y--) {
for (int x = minX; x <= maxX; x++) {
glm::vec3 baryCentricCoordinate = calculateBarycentricCoordinate(primitives[index], glm::vec2(x, y));
if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
int z = getZAtCoordinate(baryCentricCoordinate, primitives[index]) * 10000.0f;
int depthIndex = w - x + (h - y) * w;
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
depthbuffer[depthIndex].color = baryCentricCoordinate.x * primitives[index].v[0].col + baryCentricCoordinate.y
* primitives[index].v[1].col + baryCentricCoordinate.z * primitives[index].v[2].col;
depthbuffer[depthIndex].position = baryCentricCoordinate.x * primitives[index].v[0].pos + baryCentricCoordinate.y
* primitives[index].v[1].pos + baryCentricCoordinate.z * primitives[index].v[2].pos;
depthbuffer[depthIndex].normal = baryCentricCoordinate.x * primitives[index].v[0].nor + baryCentricCoordinate.y
* primitives[index].v[1].nor + baryCentricCoordinate.z * primitives[index].v[2].nor;
}
}
}
}
}
}
/**
* Rasterize point primitives.
*/
__global__
void pointRasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
Triangle primitive = primitives[index];
int x = round(primitive.v[1].pos.x), y = round(primitive.v[1].pos.y);
int z = primitive.v[1].pos.z * 10000.0f;
int depthIndex = w - x + (h - y) * w;
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = primitive.v[1].pos;
fragment.normal = primitive.v[1].nor;
depthbuffer[depthIndex] = fragment;
}
}
}
/**
* Rasterize line primitives.
*/
__global__
void lineRasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
Triangle primitive = primitives[index];
glm::vec3 minPosition = primitive.v[0].pos, maxPosition = primitive.v[1].pos;
if (round(minPosition.x) == round(maxPosition.x)) {
// Get straight vertical line
int x = round(minPosition.x);
if (minPosition.y > maxPosition.y) {
// Flip
minPosition = primitive.v[1].pos;
maxPosition = primitive.v[0].pos;
}
for (int y = round(maxPosition.y); y >= round(minPosition.y); y--) {
float minMaxRatio = __fdividef(y - minPosition.y, maxPosition.y - minPosition.y);
int depthIndex = w - x + (h - y) * w;
int z = -(minMaxRatio * round(minPosition.z) + (1.0f - minMaxRatio) * round(maxPosition.z));
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = glm::vec3(x, y, -z);
fragment.normal = glm::normalize(primitive.v[0].nor + primitive.v[1].nor);
depthbuffer[depthIndex] = fragment;
}
}
}
else {
//Have to calculate a Bresenham line
if (round(minPosition.x) > round(maxPosition.x)) {
// Swap
minPosition = primitive.v[1].pos;
maxPosition = primitive.v[0].pos;
}
float slope = (maxPosition.y - minPosition.y) / (maxPosition.x - minPosition.x);
for (int x = round(minPosition.x); x <= round(maxPosition.x); x++) {
int y = slope * (x - round(minPosition.x)) + minPosition.y;
float minMaxRatio = __fdividef(y - minPosition.y, maxPosition.y - minPosition.y);
int depthIndex = w - x + (h - y) * w;
int z = -(minMaxRatio * minPosition.z + (1.0f - minMaxRatio) * maxPosition.z);
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = glm::vec3(x, y, -z);
fragment.normal = glm::normalize(primitive.v[0].nor + primitive.v[1].nor);
depthbuffer[depthIndex] = fragment;
}
}
}
}
}
/**
* Fragment shader. Use light argument to color the fragment in the depth buffer.
*/
__global__
void fragmentShading(int w, int h, Fragment *depthBuffer, const Light light) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < (w * h)) {
Fragment fragment = depthBuffer[index];
depthBuffer[index].color = (glm::dot(glm::normalize(light.position - fragment.position), fragment.normal)
* fragment.color * light.color);
}
}
/**
* Perform backface culling optimization, removing unscene fragments.
*/
__global__
void backFaceCulling(int w, int primitiveCount, Triangle *primitives, glm::vec3 cameraPosition) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
if (glm::dot(primitives[index].v[0].model_pos - cameraPosition, primitives[index].v[0].nor) >= 0.0f) {
primitives[index].visible = false;
}
}
}
/**
* Perform scissor test culling, removing fragments outside of the scissor area.
*/
__global__
void scissorTest(int w, int primitiveCount, Triangle *primitives, const glm::vec2 scissorMax, const glm::vec2 scissorMin) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
if (primitives[index].boundingBox.min.y > scissorMax.y || primitives[index].boundingBox.max.y < scissorMin.y ||
primitives[index].boundingBox.max.x > scissorMax.x || primitives[index].boundingBox.max.x < scissorMin.x) {
primitives[index].visible = false;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h, Scene *s) {
width = w;
height = h;
scene = s;
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
hipMemset(dev_depth, scene->farPlane * 10000.0f, width * height * sizeof(int));
hipFree(dev_depthbuffer);
hipMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
hipMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
primitiveCount = vertCount / 3;
hipFree(dev_bufIdx);
hipMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
hipMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), hipMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
hipFree(dev_bufVertex);
hipMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
hipMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), hipMemcpyHostToDevice);
hipFree(dev_bufVertexOut);
hipMalloc(&dev_bufVertexOut, vertCount * sizeof(VertexOut));
hipMemset(dev_bufVertexOut, 0, vertCount * sizeof(VertexIn));
hipFree(dev_primitives);
hipMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
hipMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
hipFree(dev_compactionOutput);
hipMalloc(&dev_compactionOutput, vertCount / 3 * sizeof(Triangle));
hipMemset(dev_compactionOutput, 0, vertCount / 3 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y);
int vertexBlockSize = VERTBLOCKSIZE, fragmentBlockSize = FRAGBLOCKSIZE;
int vertexGridSize = (vertCount + VERTBLOCKSIZE - 1) / VERTBLOCKSIZE;
int fragmentGridSize = (width * height + FRAGBLOCKSIZE - 1) / FRAGBLOCKSIZE;
primitiveCount = vertCount / 3;
// Clear depth buffer
clearDepthBuffer();
// Vertex shading
hipLaunchKernelGGL(( vertexShading), dim3(vertexGridSize), dim3(vertexBlockSize), 0, 0, width, height, scene->nearPlane, scene->farPlane, vertCount, dev_bufVertex, dev_bufVertexOut, scene->modelView);
// Primitive Assembly
hipLaunchKernelGGL(( assemblePrimitives), dim3(vertexGridSize), dim3(vertexBlockSize), 0, 0, primitiveCount, dev_bufVertexOut, dev_primitives, dev_bufIdx);
// Culling after Primitive assembly
if (scene->culling) {
hipLaunchKernelGGL(( backFaceCulling), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, primitiveCount, dev_primitives, scene->camera.position);
primitiveCount = StreamCompaction::Efficient::Compact(primitiveCount, dev_compactionOutput, dev_primitives);
hipMemcpy(dev_primitives, dev_compactionOutput, primitiveCount * sizeof(Triangle), hipMemcpyDeviceToDevice);
}
// Scissor test
if (scene->scissor) {
hipLaunchKernelGGL(( scissorTest), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, primitiveCount, dev_primitives, scene->scissorMax, scene->scissorMin);
primitiveCount = StreamCompaction::Efficient::Compact(primitiveCount, dev_compactionOutput, dev_primitives);
hipMemcpy(dev_primitives, dev_compactionOutput, primitiveCount * sizeof(Triangle), hipMemcpyDeviceToDevice);
}
// rasterization
// Choose between primitive types based on scene file
if (scene->pointRasterization) {
hipLaunchKernelGGL(( pointRasterization), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
else if (scene->lineRasterization) {
hipLaunchKernelGGL(( lineRasterization), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
else {
// Standard triangle rasterization
hipLaunchKernelGGL(( rasterization), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
// Fragment shading
hipLaunchKernelGGL(( fragmentShading), dim3(fragmentGridSize), dim3(fragmentBlockSize), 0, 0, width, height, dev_depthbuffer, scene->light);
// Copy depthbuffer colors into framebuffer
hipLaunchKernelGGL(( render), dim3(blockCount2d), dim3(blockSize2d), 0, 0, width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("rasterize");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
hipFree(dev_bufIdx);
dev_bufIdx = NULL;
hipFree(dev_bufVertex);
dev_bufVertex = NULL;
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_depthbuffer);
dev_depthbuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
hipFree(dev_compactionOutput);
dev_compactionOutput = NULL;
checkCUDAError("rasterizeFree");
}
| 111d27781eb3bd621ca89a53347eef371e940206.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya
* @date 2012-2015
* @copyright University of Pennsylvania & STUDENT
*/
#include "rasterize.h"
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include "rasterizeTools.h"
#include "../stream_compaction/efficient.h"
#define VERTBLOCKSIZE 256
#define FRAGBLOCKSIZE 256
static int width = 0;
static int height = 0;
static Scene *scene = NULL;
static int *dev_bufIdx = NULL;
static VertexIn *dev_bufVertex = NULL;
static VertexOut *dev_bufVertexOut = NULL;
static Triangle *dev_primitives = NULL;
static int *dev_depth = NULL;
static Fragment *dev_depthbuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static int bufIdxSize = 0;
static int vertCount = 0;
static int primitiveCount = 0;
static Triangle *dev_compactionOutput = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
// Writes fragment colors to the framebuffer
__global__
void render(int w, int h, Fragment *depthbuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
framebuffer[index] = depthbuffer[index].color;
}
}
/**
* Clears the depth buffers and primitive buffer.
*/
void clearDepthBuffer() {
cudaMemset(dev_depth, scene->farPlane * 10000, width * height * sizeof(int));
cudaMemset(dev_depthbuffer, 0.0f, width * height * sizeof(Fragment));
}
/**
* Apply vertex transformations and transfer to vertex out buffer
*/
__global__
void vertexShading(int w, int h, int nearPlane, int farPlane, int vertexCount, const VertexIn *vertexBufferIn,
VertexOut *vertexBufferOut, const glm::mat4 modelView) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < vertexCount) {
glm::vec4 clipCoordinates = modelView * glm::vec4(vertexBufferIn[index].pos, 1.0f);
glm::vec3 normDeviceCoordinates = glm::vec3(clipCoordinates.x, clipCoordinates.y, clipCoordinates.z) / clipCoordinates.w;
vertexBufferOut[index].pos = glm::vec3(w * 0.5f * (normDeviceCoordinates.x + 1.0f),
h * 0.5f * (normDeviceCoordinates.y + 1.0f), 0.5f * ((farPlane - nearPlane)
* normDeviceCoordinates.z + (farPlane + nearPlane)));
vertexBufferOut[index].col = vertexBufferIn[index].col;
vertexBufferOut[index].nor = vertexBufferIn[index].nor;
vertexBufferOut[index].model_pos = vertexBufferIn[index].pos;
}
}
/**
* Assemble primitives from vertex out buffer data.
*/
__global__
void assemblePrimitives(int primitiveCount, const VertexOut *vertexBufferOut, Triangle *primitives, const int *bufIdx) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < primitiveCount) {
for (int i = 0; i < 3; i++) {
primitives[index].v[i] = vertexBufferOut[bufIdx[3 * index + i]];
}
primitives[index].boundingBox = getAABBForTriangle(primitives[index]);
primitives[index].visible = true;
}
}
/**
* Perform scanline rasterization on a triangle
*/
__global__
void rasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
// Only doing scanline triangle atm
int minX = fmaxf(round(primitives[index].boundingBox.min.x), 0.0f), minY = fmaxf(round(primitives[index].boundingBox.min.y), 0.0f);
int maxX = fminf(round(primitives[index].boundingBox.max.x), (float)w);
// Loop through each scanline, then each pixel on the line
for (int y = fminf(round(primitives[index].boundingBox.max.y), (float)h); y >= minY; y--) {
for (int x = minX; x <= maxX; x++) {
glm::vec3 baryCentricCoordinate = calculateBarycentricCoordinate(primitives[index], glm::vec2(x, y));
if (isBarycentricCoordInBounds(baryCentricCoordinate)) {
int z = getZAtCoordinate(baryCentricCoordinate, primitives[index]) * 10000.0f;
int depthIndex = w - x + (h - y) * w;
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
depthbuffer[depthIndex].color = baryCentricCoordinate.x * primitives[index].v[0].col + baryCentricCoordinate.y
* primitives[index].v[1].col + baryCentricCoordinate.z * primitives[index].v[2].col;
depthbuffer[depthIndex].position = baryCentricCoordinate.x * primitives[index].v[0].pos + baryCentricCoordinate.y
* primitives[index].v[1].pos + baryCentricCoordinate.z * primitives[index].v[2].pos;
depthbuffer[depthIndex].normal = baryCentricCoordinate.x * primitives[index].v[0].nor + baryCentricCoordinate.y
* primitives[index].v[1].nor + baryCentricCoordinate.z * primitives[index].v[2].nor;
}
}
}
}
}
}
/**
* Rasterize point primitives.
*/
__global__
void pointRasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
Triangle primitive = primitives[index];
int x = round(primitive.v[1].pos.x), y = round(primitive.v[1].pos.y);
int z = primitive.v[1].pos.z * 10000.0f;
int depthIndex = w - x + (h - y) * w;
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = primitive.v[1].pos;
fragment.normal = primitive.v[1].nor;
depthbuffer[depthIndex] = fragment;
}
}
}
/**
* Rasterize line primitives.
*/
__global__
void lineRasterization(int w, int h, int primitiveCount, Triangle *primitives, Fragment *depthbuffer, int *depth) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
Triangle primitive = primitives[index];
glm::vec3 minPosition = primitive.v[0].pos, maxPosition = primitive.v[1].pos;
if (round(minPosition.x) == round(maxPosition.x)) {
// Get straight vertical line
int x = round(minPosition.x);
if (minPosition.y > maxPosition.y) {
// Flip
minPosition = primitive.v[1].pos;
maxPosition = primitive.v[0].pos;
}
for (int y = round(maxPosition.y); y >= round(minPosition.y); y--) {
float minMaxRatio = __fdividef(y - minPosition.y, maxPosition.y - minPosition.y);
int depthIndex = w - x + (h - y) * w;
int z = -(minMaxRatio * round(minPosition.z) + (1.0f - minMaxRatio) * round(maxPosition.z));
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = glm::vec3(x, y, -z);
fragment.normal = glm::normalize(primitive.v[0].nor + primitive.v[1].nor);
depthbuffer[depthIndex] = fragment;
}
}
}
else {
//Have to calculate a Bresenham line
if (round(minPosition.x) > round(maxPosition.x)) {
// Swap
minPosition = primitive.v[1].pos;
maxPosition = primitive.v[0].pos;
}
float slope = (maxPosition.y - minPosition.y) / (maxPosition.x - minPosition.x);
for (int x = round(minPosition.x); x <= round(maxPosition.x); x++) {
int y = slope * (x - round(minPosition.x)) + minPosition.y;
float minMaxRatio = __fdividef(y - minPosition.y, maxPosition.y - minPosition.y);
int depthIndex = w - x + (h - y) * w;
int z = -(minMaxRatio * minPosition.z + (1.0f - minMaxRatio) * maxPosition.z);
atomicMin(&depth[depthIndex], z);
if (depth[depthIndex] == z) {
Fragment fragment;
fragment.color = primitive.v[1].col;
fragment.position = glm::vec3(x, y, -z);
fragment.normal = glm::normalize(primitive.v[0].nor + primitive.v[1].nor);
depthbuffer[depthIndex] = fragment;
}
}
}
}
}
/**
* Fragment shader. Use light argument to color the fragment in the depth buffer.
*/
__global__
void fragmentShading(int w, int h, Fragment *depthBuffer, const Light light) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < (w * h)) {
Fragment fragment = depthBuffer[index];
depthBuffer[index].color = (glm::dot(glm::normalize(light.position - fragment.position), fragment.normal)
* fragment.color * light.color);
}
}
/**
* Perform backface culling optimization, removing unscene fragments.
*/
__global__
void backFaceCulling(int w, int primitiveCount, Triangle *primitives, glm::vec3 cameraPosition) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
if (glm::dot(primitives[index].v[0].model_pos - cameraPosition, primitives[index].v[0].nor) >= 0.0f) {
primitives[index].visible = false;
}
}
}
/**
* Perform scissor test culling, removing fragments outside of the scissor area.
*/
__global__
void scissorTest(int w, int primitiveCount, Triangle *primitives, const glm::vec2 scissorMax, const glm::vec2 scissorMin) {
int index = ((blockIdx.x * blockDim.x) + threadIdx.x) + (((blockIdx.y * blockDim.y) + threadIdx.y) * w);
if (index < primitiveCount) {
if (primitives[index].boundingBox.min.y > scissorMax.y || primitives[index].boundingBox.max.y < scissorMin.y ||
primitives[index].boundingBox.max.x > scissorMax.x || primitives[index].boundingBox.max.x < scissorMin.x) {
primitives[index].visible = false;
}
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h, Scene *s) {
width = w;
height = h;
scene = s;
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
cudaMemset(dev_depth, scene->farPlane * 10000.0f, width * height * sizeof(int));
cudaFree(dev_depthbuffer);
cudaMalloc(&dev_depthbuffer, width * height * sizeof(Fragment));
cudaMemset(dev_depthbuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
checkCUDAError("rasterizeInit");
}
/**
* Set all of the buffers necessary for rasterization.
*/
void rasterizeSetBuffers(
int _bufIdxSize, int *bufIdx,
int _vertCount, float *bufPos, float *bufNor, float *bufCol) {
bufIdxSize = _bufIdxSize;
vertCount = _vertCount;
primitiveCount = vertCount / 3;
cudaFree(dev_bufIdx);
cudaMalloc(&dev_bufIdx, bufIdxSize * sizeof(int));
cudaMemcpy(dev_bufIdx, bufIdx, bufIdxSize * sizeof(int), cudaMemcpyHostToDevice);
VertexIn *bufVertex = new VertexIn[_vertCount];
for (int i = 0; i < vertCount; i++) {
int j = i * 3;
bufVertex[i].pos = glm::vec3(bufPos[j + 0], bufPos[j + 1], bufPos[j + 2]);
bufVertex[i].nor = glm::vec3(bufNor[j + 0], bufNor[j + 1], bufNor[j + 2]);
bufVertex[i].col = glm::vec3(bufCol[j + 0], bufCol[j + 1], bufCol[j + 2]);
}
cudaFree(dev_bufVertex);
cudaMalloc(&dev_bufVertex, vertCount * sizeof(VertexIn));
cudaMemcpy(dev_bufVertex, bufVertex, vertCount * sizeof(VertexIn), cudaMemcpyHostToDevice);
cudaFree(dev_bufVertexOut);
cudaMalloc(&dev_bufVertexOut, vertCount * sizeof(VertexOut));
cudaMemset(dev_bufVertexOut, 0, vertCount * sizeof(VertexIn));
cudaFree(dev_primitives);
cudaMalloc(&dev_primitives, vertCount / 3 * sizeof(Triangle));
cudaMemset(dev_primitives, 0, vertCount / 3 * sizeof(Triangle));
cudaFree(dev_compactionOutput);
cudaMalloc(&dev_compactionOutput, vertCount / 3 * sizeof(Triangle));
cudaMemset(dev_compactionOutput, 0, vertCount / 3 * sizeof(Triangle));
checkCUDAError("rasterizeSetBuffers");
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width + blockSize2d.x - 1) / blockSize2d.x,
(height + blockSize2d.y - 1) / blockSize2d.y);
int vertexBlockSize = VERTBLOCKSIZE, fragmentBlockSize = FRAGBLOCKSIZE;
int vertexGridSize = (vertCount + VERTBLOCKSIZE - 1) / VERTBLOCKSIZE;
int fragmentGridSize = (width * height + FRAGBLOCKSIZE - 1) / FRAGBLOCKSIZE;
primitiveCount = vertCount / 3;
// Clear depth buffer
clearDepthBuffer();
// Vertex shading
vertexShading<<<vertexGridSize, vertexBlockSize>>>(width, height, scene->nearPlane, scene->farPlane, vertCount, dev_bufVertex, dev_bufVertexOut, scene->modelView);
// Primitive Assembly
assemblePrimitives<<<vertexGridSize, vertexBlockSize>>>(primitiveCount, dev_bufVertexOut, dev_primitives, dev_bufIdx);
// Culling after Primitive assembly
if (scene->culling) {
backFaceCulling<<<blockCount2d, blockSize2d>>>(width, primitiveCount, dev_primitives, scene->camera.position);
primitiveCount = StreamCompaction::Efficient::Compact(primitiveCount, dev_compactionOutput, dev_primitives);
cudaMemcpy(dev_primitives, dev_compactionOutput, primitiveCount * sizeof(Triangle), cudaMemcpyDeviceToDevice);
}
// Scissor test
if (scene->scissor) {
scissorTest<<<blockCount2d, blockSize2d>>>(width, primitiveCount, dev_primitives, scene->scissorMax, scene->scissorMin);
primitiveCount = StreamCompaction::Efficient::Compact(primitiveCount, dev_compactionOutput, dev_primitives);
cudaMemcpy(dev_primitives, dev_compactionOutput, primitiveCount * sizeof(Triangle), cudaMemcpyDeviceToDevice);
}
// rasterization
// Choose between primitive types based on scene file
if (scene->pointRasterization) {
pointRasterization<<<blockCount2d, blockSize2d>>>(width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
else if (scene->lineRasterization) {
lineRasterization<<<blockCount2d, blockSize2d>>>(width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
else {
// Standard triangle rasterization
rasterization<<<blockCount2d, blockSize2d>>>(width, height, primitiveCount, dev_primitives, dev_depthbuffer, dev_depth);
}
// Fragment shading
fragmentShading<<<fragmentGridSize, fragmentBlockSize>>>(width, height, dev_depthbuffer, scene->light);
// Copy depthbuffer colors into framebuffer
render<<<blockCount2d, blockSize2d>>>(width, height, dev_depthbuffer, dev_framebuffer);
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("rasterize");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
cudaFree(dev_bufIdx);
dev_bufIdx = NULL;
cudaFree(dev_bufVertex);
dev_bufVertex = NULL;
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_depthbuffer);
dev_depthbuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
cudaFree(dev_compactionOutput);
dev_compactionOutput = NULL;
checkCUDAError("rasterizeFree");
}
|
b9d5a4d6822859141d8c48cdc9cdc06ef3468cf6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// imgproc_main.cpp
//
//
// Created by Nathaniel Lewis on 3/8/12.
// Copyright (c) 2012 E1FTW Games. All rights reserved.
//
#include <iostream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define Blur 5
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[1024];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
__global__ void Dilate(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
bool check = false ;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
if(float(source[((y+j) * width) + (x+i)]) != 0){
sum = 255;
break;
}
else{
float w = convolutionKernelStore[(j * kWidth) + i + kOffset];
sum += w*float(source[((y+j) * width) + (x+i)]);
}
}
if(check == true){
check = false ;
break;
}
}
}
if(sum != 0){
sum = 255;
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
__global__ void Erode(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
bool check = false ;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
if(float(source[((y+j) * width) + (x+i)]) == 0){
sum = 0;
check = true;
break ;
}
else{
float w = convolutionKernelStore[(j * kWidth) + i + kOffset];
sum += w*float(source[((y+j) * width) + (x+i)]);
}
}
if(check == true){
check = false ;
break;
}
}
}
if(sum != 0){
sum = 255;
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
// converts the pythagoran theorem along a vector on the GPU
__global__ void pythagoras(unsigned char *a, unsigned char *b, unsigned char *c)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
float bf = float(b[idx]);
c[idx] = (unsigned char) sqrtf(af*af + bf*bf);
// c[idx] = (unsigned char) abs(af*af - bf*bf);
}
__global__ void theshould(unsigned char *a, unsigned char *c, int min)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
if(af > min ){
c[idx] = 255;
}
else{
c[idx] = 0;
}
// c[idx] = (unsigned char) abs(af*af - bf*bf);
}
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
unsigned char* createImageBuffer(unsigned int bytes, unsigned char **devicePtr)
{
unsigned char *ptr = NULL;
hipSetDeviceFlags(hipDeviceMapHost);
hipHostMalloc(&ptr, bytes, hipHostMallocMapped);
hipHostGetDevicePointer(devicePtr, ptr, 0);
return ptr;
}
int main (int argc, char** argv)
{
// Open a webcamera
//uchar3 test ;
cv::VideoCapture camera(1);
cv::Mat frame;
if(!camera.isOpened())
return -1;
// Create the capture windows
cv::namedWindow("Source");
cv::namedWindow("Greyscale");
cv::namedWindow("Blurred");
cv::namedWindow("Sobel");
// Create the cuda event timers
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Create the gaussian kernel (sum = 159)
// const float gaussianKernel5x5[25] =
// {
// 2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
// 4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
// 5.f/159.f, 12.f/159.f, 15.f/159.f, 12.f/159.f, 5.f/159.f,
// 4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
// 2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
// };
const float gaussianKernel5x5[Blur*Blur] =
{
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
};
hipMemcpyToSymbol(convolutionKernelStore, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0);
const ssize_t gaussianKernel5x5Offset = 0;
// Sobel gradient kernels
const float sobelGradientX[9] =
{
0.f, 1.f, 0.f,
1.f, -4.f, 1.f,
0.f, 1.f, 0.f,
};
const float sobelGradientY[9] =
{
1.f, 1.f, 1.f,
1.f, -8.f, 1.f,
1.f, 1.f, 1.f,
};
// const float sobelGradientX[9] =
// {
// 0.f, -1.f, 0.f,
// -1.f, 4.f, -1.f,
// 0.f, -1.f, 0.f,
// };
// const float sobelGradientY[9] =
// {
// -1.f, -1.f, -1.f,
// -1.f, 8.f, -1.f,
// -1.f, -1.f, -1.f,
// };
// const float sobelGradientX[9] =
// {
// 0.f, -8.f, 0.f,
// -8.f, 4.f, -8.f,
// 0.f, -8.f, 0.f,
// };
// const float sobelGradientY[9] =
// {
// -2.f, 0.f, -2.f,
// -2.f, 7.f, -2.f,
// -2.f, 0.f, -2.f,
// };
// const float sobelGradientX[25] =
// {
// 2.f, 2.f, 4.f, 2.f, 2.f,
// 1.f, 1.f, 2.f, 1.f, 1.f,
// 0.f, 0.f, 0.f, 0.f, 0.f,
// -1.f, -1.f, -2.f, -1.f, -1.f,
// -2.f, -2.f, -4.f, -2.f, -2.f,
// };
// const float sobelGradientY[25] =
// {
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 4.f, 2.f, 0.f, -2.f, -4.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// };
// const float sobelGradientX[9] =
// {
// 5.f, 5.f, 5.f,
// -3.f, 0.f, -3.f,
// -3.f, -3.f, -3.f,
// };
// const float sobelGradientY[9] =
// {
// 5.f, -3.f, -3.f,
// 5.f, 0.f, -3.f,
// 5.f, -3.f, -3.f,
// };
const float dilate[9] =
{
1.f, 1.f, 1.f,
1.f, 1.f, 1.f,
1.f, 1.f, 1.f
};
const float erode[9] =
{
1.f, 1.f, 1.f,
1.f, 1.f, 1.f,
1.f, 1.f, 1.f
};
hipMemcpyToSymbol(convolutionKernelStore, sobelGradientX, sizeof(sobelGradientX), sizeof(gaussianKernel5x5));
hipMemcpyToSymbol(convolutionKernelStore, sobelGradientY, sizeof(sobelGradientY), sizeof(gaussianKernel5x5) + sizeof(sobelGradientX));
const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float);
const ssize_t sobelGradientYOffset = sizeof(sobelGradientX)/sizeof(float) + sobelGradientXOffset;
const ssize_t dilateKernel = sizeof(dilate)/sizeof(float);
const ssize_t erodeKernel = sizeof(erode)/sizeof(float);
const ssize_t erodeKernel2 = sizeof(erodeKernel)/sizeof(float);
// Create CPU/GPU shared images - one for the initial and one for the result
camera >> frame;
unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice, *thresholdDataDevice, *dilateDataDevice, *erodeDataDevice, *erodeDataDevice2, *dilateDataDevic2;
cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice));
cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice));
cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice));
cv::Mat thes (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &thresholdDataDevice));
cv::Mat dilates (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &dilateDataDevice));
cv::Mat dilates2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &dilateDataDevic2));
cv::Mat erodes (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &erodeDataDevice));
cv::Mat erodes2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &erodeDataDevice2));
// Create two temporary images (for holding sobel gradients)
unsigned char *deviceGradientX, *deviceGradientY;
hipMalloc(&deviceGradientX, frame.size().width * frame.size().height);
hipMalloc(&deviceGradientY, frame.size().width * frame.size().height);
// Loop while capturing images
while(1)
{
// Capture the image and store a gray conversion to the gpu
camera >> frame;
cv::cvtColor(frame, source, CV_BGR2GRAY);
// Record the time it takes to process
hipEventRecord(start);
{
// convolution kernel launch parameters
dim3 cblocks (frame.size().width / 32, frame.size().height / 32);
dim3 cthreads(32, 32);
// pythagoran kernel launch paramters
dim3 pblocks (frame.size().width * frame.size().height / 1024);
dim3 pthreads(1024, 1);
//hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, sourceDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, Blur, Blur, blurredDataDevice);
// Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image)
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, sourceDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX);
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, sourceDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY);
hipLaunchKernelGGL(( pythagoras), dim3(pblocks),dim3(pthreads), 0, 0, deviceGradientX, deviceGradientY, edgesDataDevice);
// Perform the gaussian blur (first kernel in store @ 0)
hipLaunchKernelGGL(( convolve), dim3(cblocks),dim3(cthreads), 0, 0, edgesDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, Blur, Blur, blurredDataDevice);
hipLaunchKernelGGL(( theshould), dim3(pblocks),dim3(pthreads), 0, 0, blurredDataDevice,thresholdDataDevice,60);
hipLaunchKernelGGL(( Dilate), dim3(cblocks),dim3(cthreads), 0, 0, thresholdDataDevice, frame.size().width, frame.size().height, 2, 2, dilateKernel, 4, 4, dilateDataDevice);
hipLaunchKernelGGL(( Erode), dim3(cblocks),dim3(cthreads), 0, 0, dilateDataDevice, frame.size().width, frame.size().height, 2, 2, erodeKernel, 7, 7, erodeDataDevice);
// //
//hipLaunchKernelGGL(( Erode), dim3(cblocks),dim3(cthreads), 0, 0, dilateDataDevice, frame.size().width, frame.size().height, 2, 2, erodeKernl, 4, 4, erodeDataDevice);
//hipLaunchKernelGGL(( Dilate), dim3(cblocks),dim3(cthreads), 0, 0, erodeDataDevice, frame.size().width, frame.size().height, 2, 2, dilateKernel, 4, 4, dilateDataDevic2);
//hipLaunchKernelGGL(( Erode), dim3(cblocks),dim3(cthreads), 0, 0, dilateDataDevic2, frame.size().width, frame.size().height, 2, 2, erodeKernel2, 4, 4, erodeDataDevice2);
hipDeviceSynchronize();
}
hipEventRecord(stop);
// Display the elapsed time
float ms = 0.0f;
hipEventSynchronize(stop);
hipEventElapsedTime(&ms, start, stop);
std::cout << "Elapsed GPU time: " << ms << " milliseconds" << std::endl;
// Show the results
cv::imshow("Source", frame);
cv::imshow("Greyscale", source);
cv::imshow("Sobel", edges);
cv::imshow("Blurred", blurred);
cv::imshow("Theshould", thes);
cv::imshow("Erode", erodes);
cv::imshow("Erode2", erodes2);
cv::imshow("Dilate", dilates);
cv::imshow("Dilate2", dilates2);
// Spin
if(cv::waitKey(1) == 27) break;
}
// Exit
hipHostFree(source.data);
hipHostFree(blurred.data);
hipHostFree(edges.data);
hipFree(deviceGradientX);
hipFree(deviceGradientY);
return 0;
}
| b9d5a4d6822859141d8c48cdc9cdc06ef3468cf6.cu | //
// imgproc_main.cpp
//
//
// Created by Nathaniel Lewis on 3/8/12.
// Copyright (c) 2012 E1FTW Games. All rights reserved.
//
#include <iostream>
#include <string>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define Blur 5
// GPU constant memory to hold our kernels (extremely fast access time)
__constant__ float convolutionKernelStore[1024];
/**
* Convolution function for cuda. Destination is expected to have the same width/height as source, but there will be a border
* of floor(kWidth/2) pixels left and right and floor(kHeight/2) pixels top and bottom
*
* @param source Source image host pinned memory pointer
* @param width Source image width
* @param height Source image height
* @param paddingX source image padding along x
* @param paddingY source image padding along y
* @param kOffset offset into kernel store constant memory
* @param kWidth kernel width
* @param kHeight kernel height
* @param destination Destination image host pinned memory pointer
*/
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernelStore[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
__global__ void Dilate(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
bool check = false ;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
if(float(source[((y+j) * width) + (x+i)]) != 0){
sum = 255;
break;
}
else{
float w = convolutionKernelStore[(j * kWidth) + i + kOffset];
sum += w*float(source[((y+j) * width) + (x+i)]);
}
}
if(check == true){
check = false ;
break;
}
}
}
if(sum != 0){
sum = 255;
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
__global__ void Erode(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
bool check = false ;
float sum = 0.0;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Only execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (blockDim.x * gridDim.x)-pWidth-paddingX &&
y < (blockDim.y * gridDim.y)-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
if(float(source[((y+j) * width) + (x+i)]) == 0){
sum = 0;
check = true;
break ;
}
else{
float w = convolutionKernelStore[(j * kWidth) + i + kOffset];
sum += w*float(source[((y+j) * width) + (x+i)]);
}
}
if(check == true){
check = false ;
break;
}
}
}
if(sum != 0){
sum = 255;
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
}
// converts the pythagoran theorem along a vector on the GPU
__global__ void pythagoras(unsigned char *a, unsigned char *b, unsigned char *c)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
float bf = float(b[idx]);
c[idx] = (unsigned char) sqrtf(af*af + bf*bf);
// c[idx] = (unsigned char) abs(af*af - bf*bf);
}
__global__ void theshould(unsigned char *a, unsigned char *c, int min)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
float af = float(a[idx]);
if(af > min ){
c[idx] = 255;
}
else{
c[idx] = 0;
}
// c[idx] = (unsigned char) abs(af*af - bf*bf);
}
// create an image buffer. return host ptr, pass out device pointer through pointer to pointer
unsigned char* createImageBuffer(unsigned int bytes, unsigned char **devicePtr)
{
unsigned char *ptr = NULL;
cudaSetDeviceFlags(cudaDeviceMapHost);
cudaHostAlloc(&ptr, bytes, cudaHostAllocMapped);
cudaHostGetDevicePointer(devicePtr, ptr, 0);
return ptr;
}
int main (int argc, char** argv)
{
// Open a webcamera
//uchar3 test ;
cv::VideoCapture camera(1);
cv::Mat frame;
if(!camera.isOpened())
return -1;
// Create the capture windows
cv::namedWindow("Source");
cv::namedWindow("Greyscale");
cv::namedWindow("Blurred");
cv::namedWindow("Sobel");
// Create the cuda event timers
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Create the gaussian kernel (sum = 159)
// const float gaussianKernel5x5[25] =
// {
// 2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
// 4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
// 5.f/159.f, 12.f/159.f, 15.f/159.f, 12.f/159.f, 5.f/159.f,
// 4.f/159.f, 9.f/159.f, 12.f/159.f, 9.f/159.f, 4.f/159.f,
// 2.f/159.f, 4.f/159.f, 5.f/159.f, 4.f/159.f, 2.f/159.f,
// };
const float gaussianKernel5x5[Blur*Blur] =
{
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f, 1.f/25.f,
};
cudaMemcpyToSymbol(convolutionKernelStore, gaussianKernel5x5, sizeof(gaussianKernel5x5), 0);
const ssize_t gaussianKernel5x5Offset = 0;
// Sobel gradient kernels
const float sobelGradientX[9] =
{
0.f, 1.f, 0.f,
1.f, -4.f, 1.f,
0.f, 1.f, 0.f,
};
const float sobelGradientY[9] =
{
1.f, 1.f, 1.f,
1.f, -8.f, 1.f,
1.f, 1.f, 1.f,
};
// const float sobelGradientX[9] =
// {
// 0.f, -1.f, 0.f,
// -1.f, 4.f, -1.f,
// 0.f, -1.f, 0.f,
// };
// const float sobelGradientY[9] =
// {
// -1.f, -1.f, -1.f,
// -1.f, 8.f, -1.f,
// -1.f, -1.f, -1.f,
// };
// const float sobelGradientX[9] =
// {
// 0.f, -8.f, 0.f,
// -8.f, 4.f, -8.f,
// 0.f, -8.f, 0.f,
// };
// const float sobelGradientY[9] =
// {
// -2.f, 0.f, -2.f,
// -2.f, 7.f, -2.f,
// -2.f, 0.f, -2.f,
// };
// const float sobelGradientX[25] =
// {
// 2.f, 2.f, 4.f, 2.f, 2.f,
// 1.f, 1.f, 2.f, 1.f, 1.f,
// 0.f, 0.f, 0.f, 0.f, 0.f,
// -1.f, -1.f, -2.f, -1.f, -1.f,
// -2.f, -2.f, -4.f, -2.f, -2.f,
// };
// const float sobelGradientY[25] =
// {
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 4.f, 2.f, 0.f, -2.f, -4.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// 2.f, 1.f, 0.f, -1.f, -2.f,
// };
// const float sobelGradientX[9] =
// {
// 5.f, 5.f, 5.f,
// -3.f, 0.f, -3.f,
// -3.f, -3.f, -3.f,
// };
// const float sobelGradientY[9] =
// {
// 5.f, -3.f, -3.f,
// 5.f, 0.f, -3.f,
// 5.f, -3.f, -3.f,
// };
const float dilate[9] =
{
1.f, 1.f, 1.f,
1.f, 1.f, 1.f,
1.f, 1.f, 1.f
};
const float erode[9] =
{
1.f, 1.f, 1.f,
1.f, 1.f, 1.f,
1.f, 1.f, 1.f
};
cudaMemcpyToSymbol(convolutionKernelStore, sobelGradientX, sizeof(sobelGradientX), sizeof(gaussianKernel5x5));
cudaMemcpyToSymbol(convolutionKernelStore, sobelGradientY, sizeof(sobelGradientY), sizeof(gaussianKernel5x5) + sizeof(sobelGradientX));
const ssize_t sobelGradientXOffset = sizeof(gaussianKernel5x5)/sizeof(float);
const ssize_t sobelGradientYOffset = sizeof(sobelGradientX)/sizeof(float) + sobelGradientXOffset;
const ssize_t dilateKernel = sizeof(dilate)/sizeof(float);
const ssize_t erodeKernel = sizeof(erode)/sizeof(float);
const ssize_t erodeKernel2 = sizeof(erodeKernel)/sizeof(float);
// Create CPU/GPU shared images - one for the initial and one for the result
camera >> frame;
unsigned char *sourceDataDevice, *blurredDataDevice, *edgesDataDevice, *thresholdDataDevice, *dilateDataDevice, *erodeDataDevice, *erodeDataDevice2, *dilateDataDevic2;
cv::Mat source (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &sourceDataDevice));
cv::Mat blurred (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &blurredDataDevice));
cv::Mat edges (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &edgesDataDevice));
cv::Mat thes (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &thresholdDataDevice));
cv::Mat dilates (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &dilateDataDevice));
cv::Mat dilates2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &dilateDataDevic2));
cv::Mat erodes (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &erodeDataDevice));
cv::Mat erodes2 (frame.size(), CV_8U, createImageBuffer(frame.size().width * frame.size().height, &erodeDataDevice2));
// Create two temporary images (for holding sobel gradients)
unsigned char *deviceGradientX, *deviceGradientY;
cudaMalloc(&deviceGradientX, frame.size().width * frame.size().height);
cudaMalloc(&deviceGradientY, frame.size().width * frame.size().height);
// Loop while capturing images
while(1)
{
// Capture the image and store a gray conversion to the gpu
camera >> frame;
cv::cvtColor(frame, source, CV_BGR2GRAY);
// Record the time it takes to process
cudaEventRecord(start);
{
// convolution kernel launch parameters
dim3 cblocks (frame.size().width / 32, frame.size().height / 32);
dim3 cthreads(32, 32);
// pythagoran kernel launch paramters
dim3 pblocks (frame.size().width * frame.size().height / 1024);
dim3 pthreads(1024, 1);
// convolve<<<cblocks,cthreads>>>(sourceDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, Blur, Blur, blurredDataDevice);
// Perform the sobel gradient convolutions (x&y padding is now 2 because there is a border of 2 around a 5x5 gaussian filtered image)
convolve<<<cblocks,cthreads>>>(sourceDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientXOffset, 3, 3, deviceGradientX);
convolve<<<cblocks,cthreads>>>(sourceDataDevice, frame.size().width, frame.size().height, 2, 2, sobelGradientYOffset, 3, 3, deviceGradientY);
pythagoras<<<pblocks,pthreads>>>(deviceGradientX, deviceGradientY, edgesDataDevice);
// Perform the gaussian blur (first kernel in store @ 0)
convolve<<<cblocks,cthreads>>>(edgesDataDevice, frame.size().width, frame.size().height, 0, 0, gaussianKernel5x5Offset, Blur, Blur, blurredDataDevice);
theshould<<<pblocks,pthreads>>>(blurredDataDevice,thresholdDataDevice,60);
Dilate<<<cblocks,cthreads>>>(thresholdDataDevice, frame.size().width, frame.size().height, 2, 2, dilateKernel, 4, 4, dilateDataDevice);
Erode<<<cblocks,cthreads>>>(dilateDataDevice, frame.size().width, frame.size().height, 2, 2, erodeKernel, 7, 7, erodeDataDevice);
// //
// Erode<<<cblocks,cthreads>>>(dilateDataDevice, frame.size().width, frame.size().height, 2, 2, erodeKernl, 4, 4, erodeDataDevice);
// Dilate<<<cblocks,cthreads>>>(erodeDataDevice, frame.size().width, frame.size().height, 2, 2, dilateKernel, 4, 4, dilateDataDevic2);
// Erode<<<cblocks,cthreads>>>(dilateDataDevic2, frame.size().width, frame.size().height, 2, 2, erodeKernel2, 4, 4, erodeDataDevice2);
cudaThreadSynchronize();
}
cudaEventRecord(stop);
// Display the elapsed time
float ms = 0.0f;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Elapsed GPU time: " << ms << " milliseconds" << std::endl;
// Show the results
cv::imshow("Source", frame);
cv::imshow("Greyscale", source);
cv::imshow("Sobel", edges);
cv::imshow("Blurred", blurred);
cv::imshow("Theshould", thes);
cv::imshow("Erode", erodes);
cv::imshow("Erode2", erodes2);
cv::imshow("Dilate", dilates);
cv::imshow("Dilate2", dilates2);
// Spin
if(cv::waitKey(1) == 27) break;
}
// Exit
cudaFreeHost(source.data);
cudaFreeHost(blurred.data);
cudaFreeHost(edges.data);
cudaFree(deviceGradientX);
cudaFree(deviceGradientY);
return 0;
}
|
7c9af095a7497b633ea7df8ddbe709fbd03eff31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <iostream>
using std::cout;
using std::endl;
using std::vector;
#include "helper.h"
#include "parameter.h"
#include "globals.h"
#include "poly_arithmetic.cuh"
#include "fips202.h"
#include "pack_unpack.h"
#include <stdlib.h>
#include <time.h>
//srand ((unsigned int) time (NULL));
void random_bytes(uint8_t seed[], size_t num_bytes)
{
size_t i;
for (i = 0; i < num_bytes; i++)
{
seed[i] = rand ();
}
}
void gen_matrix(uint16_t A[L][L][N], uint8_t seed[SABER_SEEDBYTES]) {
uint8_t buf[L * L * 32 * EQ];
int i;
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES);
for (i = 0; i < L; i++)
{
BS2POLVECq(buf + i * L * 32 * EQ, A[i]);
}
}
void gen_secret(uint16_t s[L][N], const uint8_t seed[SABER_NOISE_SEEDBYTES])
{
uint8_t buf[L * SABER_POLYCOINBYTES];
size_t i;
shake128(buf, sizeof(buf), seed, SABER_NOISE_SEEDBYTES);
for (i = 0; i < L; i++)
{
cbd(s[i], buf + i * SABER_POLYCOINBYTES);
}
}
__global__ void poly_mul(uint16_t c[], uint16_t a[], uint16_t b[], uint16_t q)
{
register int i = blockIdx.x * 1 + threadIdx.x;
uint16_t rc = a[i] * b[i];
c[i] = rc & (q-1);
}
__global__ void vector_mul(uint16_t **c, uint16_t **a, uint16_t **b, uint16_t q)
{
register int i = blockIdx.x * 1 + threadIdx.x;
hipLaunchKernelGGL(( poly_mul), dim3(1),dim3(N),0,0, c[i],a[i],b[i],q);
}
void matrix_vector_mul(uint16_t **b, uint16_t ***A, uint16_t **s) {
for (int i = 0; i < L; i++){
hipLaunchKernelGGL(( vector_mul), dim3(1),dim3(L),0,0, b, A[i], s, 1<<13);
}
}
void KeyGen(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]) {
uint16_t A[L][L][N];
uint16_t s[L][N];
uint16_t b[L][N] = {0};
uint8_t seed_A[SABER_SEEDBYTES];
uint8_t seed_s[SABER_NOISE_SEEDBYTES];
random_bytes(seed_A,SABER_SEEDBYTES);
random_bytes(seed_s,SABER_NOISE_SEEDBYTES);
gen_matrix(A, seed_A);
gen_secret(s, seed_s);
uint16_t ***A_device = (uint16_t***)malloc(sizeof (uint16_t**) * L);
for (int i = 0; i<L;i++)
A_device[i] = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++){
for (int j = 0; j<L;j++)
hipMalloc(&A_device[i][j], sizeof (uint16_t) * N);
}
hipMemcpy(A_device,A,sizeof (uint16_t) * N*L*L,hipMemcpyHostToDevice);
uint16_t **s_device = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++)
hipMalloc(&s_device[i], sizeof (uint16_t) * N);
uint16_t **b_device = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++)
hipMalloc(&b_device[i], sizeof (uint16_t) * N);
hipMemcpy(A_device,A,sizeof (uint16_t) * N*L*L,hipMemcpyHostToDevice);
matrix_vector_mul(b_device, A_device, s_device);
free(A);
hipFree(A_device);
}
int main() {
uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES];
uint8_t sk[SABER_INDCPA_SECRETKEYBYTES];
KeyGen(pk,sk);
return 0;
}
| 7c9af095a7497b633ea7df8ddbe709fbd03eff31.cu | #include <vector>
#include <iostream>
using std::cout;
using std::endl;
using std::vector;
#include "helper.h"
#include "parameter.h"
#include "globals.h"
#include "poly_arithmetic.cuh"
#include "fips202.h"
#include "pack_unpack.h"
#include <stdlib.h>
#include <time.h>
//srand ((unsigned int) time (NULL));
void random_bytes(uint8_t seed[], size_t num_bytes)
{
size_t i;
for (i = 0; i < num_bytes; i++)
{
seed[i] = rand ();
}
}
void gen_matrix(uint16_t A[L][L][N], uint8_t seed[SABER_SEEDBYTES]) {
uint8_t buf[L * L * 32 * EQ];
int i;
shake128(buf, sizeof(buf), seed, SABER_SEEDBYTES);
for (i = 0; i < L; i++)
{
BS2POLVECq(buf + i * L * 32 * EQ, A[i]);
}
}
void gen_secret(uint16_t s[L][N], const uint8_t seed[SABER_NOISE_SEEDBYTES])
{
uint8_t buf[L * SABER_POLYCOINBYTES];
size_t i;
shake128(buf, sizeof(buf), seed, SABER_NOISE_SEEDBYTES);
for (i = 0; i < L; i++)
{
cbd(s[i], buf + i * SABER_POLYCOINBYTES);
}
}
__global__ void poly_mul(uint16_t c[], uint16_t a[], uint16_t b[], uint16_t q)
{
register int i = blockIdx.x * 1 + threadIdx.x;
uint16_t rc = a[i] * b[i];
c[i] = rc & (q-1);
}
__global__ void vector_mul(uint16_t **c, uint16_t **a, uint16_t **b, uint16_t q)
{
register int i = blockIdx.x * 1 + threadIdx.x;
poly_mul<<<1,N,0,0>>>(c[i],a[i],b[i],q);
}
void matrix_vector_mul(uint16_t **b, uint16_t ***A, uint16_t **s) {
for (int i = 0; i < L; i++){
vector_mul<<<1,L,0,0>>>(b, A[i], s, 1<<13);
}
}
void KeyGen(uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES], uint8_t sk[SABER_INDCPA_SECRETKEYBYTES]) {
uint16_t A[L][L][N];
uint16_t s[L][N];
uint16_t b[L][N] = {0};
uint8_t seed_A[SABER_SEEDBYTES];
uint8_t seed_s[SABER_NOISE_SEEDBYTES];
random_bytes(seed_A,SABER_SEEDBYTES);
random_bytes(seed_s,SABER_NOISE_SEEDBYTES);
gen_matrix(A, seed_A);
gen_secret(s, seed_s);
uint16_t ***A_device = (uint16_t***)malloc(sizeof (uint16_t**) * L);
for (int i = 0; i<L;i++)
A_device[i] = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++){
for (int j = 0; j<L;j++)
cudaMalloc(&A_device[i][j], sizeof (uint16_t) * N);
}
cudaMemcpy(A_device,A,sizeof (uint16_t) * N*L*L,cudaMemcpyHostToDevice);
uint16_t **s_device = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++)
cudaMalloc(&s_device[i], sizeof (uint16_t) * N);
uint16_t **b_device = (uint16_t**)malloc(sizeof (uint16_t*) * L);
for (int i = 0; i<L;i++)
cudaMalloc(&b_device[i], sizeof (uint16_t) * N);
cudaMemcpy(A_device,A,sizeof (uint16_t) * N*L*L,cudaMemcpyHostToDevice);
matrix_vector_mul(b_device, A_device, s_device);
free(A);
cudaFree(A_device);
}
int main() {
uint8_t pk[SABER_INDCPA_PUBLICKEYBYTES];
uint8_t sk[SABER_INDCPA_SECRETKEYBYTES];
KeyGen(pk,sk);
return 0;
}
|
7d1a1fc01dbf1dc150b7a738d31ec5a5a2125409.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ float logsumexp(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
/*float diff = a-b;
if (diff < -20.0f)
{
return b;
}
else
if (diff > 20.0f)
{
return a;
}*/
if(a > b)
{
return a + log(1.0f+exp(b-a));
}
else
{
return b + log(1.0f+exp(a-b));
}
}
__device__ float safeadd(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
return a+b;
}
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__constant__ int S = 0;
__constant__ int F = 1;
__constant__ int L = 2;
__constant__ float r1logprob = -0.14094854611f;
__constant__ float r2logprob = -2.14038225046f;
__constant__ float r3logprob = -4.27902812221f;
__constant__ float r4logprob = -0.2387141463f;
__constant__ float r5logprob = -1.549472331f;
__constant__ float r6logprob = -0.11137523453f;
__constant__ float r7logprob = -2.25002110628f;
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[S*len*len + i*len + j] = unpairedlogprobs[j] + r2logprob;
inside[L*len*len + i*len + j] = unpairedlogprobs[j] + r6logprob;
inside[F*len*len + i*len + j] = -1e20f;
}
else
{
inside[S*len*len + i*len + j] = -1e20f;
inside[L*len*len + i*len + j] = -1e20f;
inside[F*len*len + i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[L*len*len + j*len + h] + inside[S*len*len + (h+1)*len + (j+b)]);
}
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b]*BT + inside[F*len*len+(j+1)*len+ (j+b-1)];
// rule 3
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = L*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[len-1];
}
}*/
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[i*len*3 + j*3 + S] = unpairedlogprobs[j] + r2logprob;
inside[i*len*3 + j*3 + L] = unpairedlogprobs[j] + r6logprob;
inside[i*len*3 + j*3 + F] = -1e20f;
}
else
{
inside[i*len*3 + j*3 + S] = -1e20f;
inside[i*len*3 + j*3 + L] = -1e20f;
inside[i*len*3 + j*3 + F] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[j*len*3 + h*3 + L] + inside[(h+1)*len*3 + (j+b)*3 + S]);
}
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b] + inside[(j+1)*len*3 + (j+b-1)*3 + F];
// rule 3
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = j*len*3 + (j+b)*3 + L;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[(len-1)*3];
}
}*/
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*len + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*len + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*len + j] = -1e20f;
}
else
{
insideS[i*len + j] = -1e20f;
insideL[i*len + j] = -1e20f;
insideF[i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*len + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*len + h] + insideS[(h+1)*len + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[index]*BT + insideF[(j+1)*len+ (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void posteriordecoding(float* ematrix, int* smatrix, const float* pairprobs, const float* singleprobs, const int datalen, const int diag, const float alpha)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < datalen-diag)
{
int j = i + diag;
float e1 = singleprobs[i] + ematrix[(i+1)*datalen + j];
float e2 = alpha*pairprobs[i*datalen+j] + ematrix[(i+1)*datalen + j-1];
float maxe3 = -1e10;
int maxk = 0;
for(int k=i+1 ; k <= j-1 ; k++)
{
float v = alpha*pairprobs[i*datalen + k] + ematrix[(i+1)*datalen + k-1] + ematrix[(k+1)*datalen + j];
if(v > maxe3)
{
maxe3 = v;
maxk = k;
}
}
float maxval = e1;
smatrix[i*datalen + j] = -1;
if(e2 > maxval)
{
maxval = e2;
smatrix[i*datalen + j] = -2;
}
if(maxe3 > maxval)
{
maxval = maxe3;
smatrix[i*datalen + j] = maxk+1;
}
ematrix[i*datalen + j] = maxval;
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}
/*
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len, const int stride)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*stride + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*stride + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*stride + j] = -1e20f;
}
else
{
insideS[i*stride + j] = -1e20f;
insideL[i*stride + j] = -1e20f;
insideF[i*stride + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const int stride, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*stride + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*stride + h] + insideS[(h+1)*stride + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len + j+b]*BT + insideF[(j+1)*stride + (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}*/
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__global__ void outsidealgorithm(float* outside, const float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len - b)
{
int index = 0;
// type 3 rules
// rule 1 Rule('S', "LS",0.868534, 3))
float tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
// rule 5 Rule('F', "LS",0.21236, 3)
tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
// type 2 rules
if ((j>=1) && (j+b+1<len))
{
float v = pairedlogprobs[(j-1)*len+(j+b+1)]*BT;
index = F*len*len + j*len + j+b;
// rule 3 Rule('S', "dFd",0.013856122002, 2)
outside[index] = logsumexp(outside[index], r3logprob*BT + outside[S*len*len + (j-1)*len + j+b+1] + v);
// rule 4 Rule('F', "dFd",0.787640, 2)
outside[index] = logsumexp(outside[index], r4logprob*BT + outside[F*len*len + (j-1)*len + j+b+1] + v);
// rule 7 Rule('L', "dFd",0.105397, 2)
outside[index] = logsumexp(outside[index], r7logprob*BT + outside[L*len*len + (j-1)*len + j+b+1] + v);
}
}
}
/*
int main()
{
int len = 8000;
int N = len;
float* inside = (float*)malloc(3*len*len*sizeof(float));
float* pairedlogprobs = (float*)malloc(len*len*sizeof(float));
float* unpairedlogprobs = (float*)malloc(len*sizeof(float));
float* d_inside;
float* d_pairedlogprobs;
float* d_unpairedlogprobs;
hipMalloc(&d_inside, 3*len*len*sizeof(float));
hipMalloc(&d_pairedlogprobs, len*len*sizeof(float));
hipMalloc(&d_unpairedlogprobs, len*len*sizeof(float));
hipMemcpy(d_inside, inside, 3*len*len*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_pairedlogprobs, pairedlogprobs, len*len*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_unpairedlogprobs, unpairedlogprobs, len*sizeof(float), hipMemcpyHostToDevice);
for(int b=1 ; b < len ; b++)
{
insidealgorithm<<<(N+511)/512, 512>>>(d_inside, d_pairedlogprobs, d_unpairedlogprobs, b, len, 1.0);
}
int code = hipMemcpy(inside, d_inside, 3*len*len*sizeof(float), hipMemcpyDeviceToHost);
printf("exitcode %d\n", code);
printf("Z %f\n", inside[len-1]);
}*/
| 7d1a1fc01dbf1dc150b7a738d31ec5a5a2125409.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
__device__ float logsumexp(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
/*float diff = a-b;
if (diff < -20.0f)
{
return b;
}
else
if (diff > 20.0f)
{
return a;
}*/
if(a > b)
{
return a + log(1.0f+exp(b-a));
}
else
{
return b + log(1.0f+exp(a-b));
}
}
__device__ float safeadd(float a, float b)
{
if(a <= -1e20f)
{
return b;
}
else
if(b <= -1e20f)
{
return a;
}
return a+b;
}
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__constant__ int S = 0;
__constant__ int F = 1;
__constant__ int L = 2;
__constant__ float r1logprob = -0.14094854611f;
__constant__ float r2logprob = -2.14038225046f;
__constant__ float r3logprob = -4.27902812221f;
__constant__ float r4logprob = -0.2387141463f;
__constant__ float r5logprob = -1.549472331f;
__constant__ float r6logprob = -0.11137523453f;
__constant__ float r7logprob = -2.25002110628f;
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[S*len*len + i*len + j] = unpairedlogprobs[j] + r2logprob;
inside[L*len*len + i*len + j] = unpairedlogprobs[j] + r6logprob;
inside[F*len*len + i*len + j] = -1e20f;
}
else
{
inside[S*len*len + i*len + j] = -1e20f;
inside[L*len*len + i*len + j] = -1e20f;
inside[F*len*len + i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[L*len*len + j*len + h] + inside[S*len*len + (h+1)*len + (j+b)]);
}
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b]*BT + inside[F*len*len+(j+1)*len+ (j+b-1)];
// rule 3
index = S*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = F*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = L*len*len + j*len + j+b;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[len-1];
}
}*/
/*
__global__ void initialiseinside(float *inside, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
inside[i*len*3 + j*3 + S] = unpairedlogprobs[j] + r2logprob;
inside[i*len*3 + j*3 + L] = unpairedlogprobs[j] + r6logprob;
inside[i*len*3 + j*3 + F] = -1e20f;
}
else
{
inside[i*len*3 + j*3 + S] = -1e20f;
inside[i*len*3 + j*3 + L] = -1e20f;
inside[i*len*3 + j*3 + F] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = 0;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, inside[j*len*3 + h*3 + L] + inside[(h+1)*len*3 + (j+b)*3 + S]);
}
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r1logprob + tmp);
// rule 5
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len+j+b] + inside[(j+1)*len*3 + (j+b-1)*3 + F];
// rule 3
index = j*len*3 + (j+b)*3 + S;
inside[index] = logsumexp(inside[index], r3logprob + v);
// rule 4
index = j*len*3 + (j+b)*3 + F;
inside[index] = logsumexp(inside[index], r4logprob + v);
// rule 7
index = j*len*3 + (j+b)*3 + L;
inside[index] = logsumexp(inside[index], r7logprob + v);
}
}
__global__ void insidez(const float* inside, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = inside[(len-1)*3];
}
}*/
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*len + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*len + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*len + j] = -1e20f;
}
else
{
insideS[i*len + j] = -1e20f;
insideL[i*len + j] = -1e20f;
insideF[i*len + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*len + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*len + h] + insideS[(h+1)*len + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[index]*BT + insideF[(j+1)*len+ (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void posteriordecoding(float* ematrix, int* smatrix, const float* pairprobs, const float* singleprobs, const int datalen, const int diag, const float alpha)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < datalen-diag)
{
int j = i + diag;
float e1 = singleprobs[i] + ematrix[(i+1)*datalen + j];
float e2 = alpha*pairprobs[i*datalen+j] + ematrix[(i+1)*datalen + j-1];
float maxe3 = -1e10;
int maxk = 0;
for(int k=i+1 ; k <= j-1 ; k++)
{
float v = alpha*pairprobs[i*datalen + k] + ematrix[(i+1)*datalen + k-1] + ematrix[(k+1)*datalen + j];
if(v > maxe3)
{
maxe3 = v;
maxk = k;
}
}
float maxval = e1;
smatrix[i*datalen + j] = -1;
if(e2 > maxval)
{
maxval = e2;
smatrix[i*datalen + j] = -2;
}
if(maxe3 > maxval)
{
maxval = maxe3;
smatrix[i*datalen + j] = maxk+1;
}
ematrix[i*datalen + j] = maxval;
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}
/*
__global__ void initialiseinside(float* insideS, float* insideL, float* insideF, const float* unpairedlogprobs, int len, const int stride)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < len)
{
for(int j=0 ; j < len ; j++)
{
if(i == j)
{
insideS[i*stride + j] = unpairedlogprobs[j] + r2logprob;
insideL[i*stride + j] = unpairedlogprobs[j] + r6logprob;
insideF[i*stride + j] = -1e20f;
}
else
{
insideS[i*stride + j] = -1e20f;
insideL[i*stride + j] = -1e20f;
insideF[i*stride + j] = -1e20f;
}
}
}
}
__global__ void insidealgorithm(float* insideS, float* insideL, float* insideF, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const int stride, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len-b)
{
int index = j*stride + j+b;
// type 3 rules
// rule 1
float tmp = -1e20f;
for(int h=j ; h < j+b ; h++)
{
tmp = logsumexp(tmp, insideL[j*stride + h] + insideS[(h+1)*stride + (j+b)]);
}
insideS[index] = logsumexp(insideS[index], r1logprob + tmp);
// rule 5
insideF[index] = logsumexp(insideF[index], r5logprob + tmp);
// type 2 rules
float v = pairedlogprobs[j*len + j+b]*BT + insideF[(j+1)*stride + (j+b-1)];
// rule 3
insideS[index] = logsumexp(insideS[index], r3logprob + v);
// rule 4
insideF[index] = logsumexp(insideF[index], r4logprob + v);
// rule 7
insideL[index] = logsumexp(insideL[index], r7logprob + v);
}
}
__global__ void insidez(const float* insideS, float* Z, const int len)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j == 0)
{
Z[j] = insideS[len-1];
}
}*/
/*
push!(rules, Rule('S', "LS",0.868534, 3))
push!(rules, Rule('S', "s",0.117609877998, 1))
push!(rules, Rule('S', "dFd",0.013856122002, 2))
push!(rules, Rule('F', "dFd",0.787640, 2))
push!(rules, Rule('F', "LS",0.21236, 3))
push!(rules, Rule('L', "s",0.894603, 1))
push!(rules, Rule('L', "dFd",0.105397, 2))
type1rules = Rule[rules[2],rules[6]]
type2rules = Rule[rules[3],rules[4],rules[7]]
type3rules = Rule[rules[1],rules[5]]
ruleindex = Dict('S' => 1, 'F' => 2, 'L' => 3)
*/
__global__ void outsidealgorithm(float* outside, const float* inside, const float* pairedlogprobs, const float* unpairedlogprobs, const int b, const int len, const float BT)
{
int j = blockIdx.x*blockDim.x + threadIdx.x;
if (j < len - b)
{
int index = 0;
// type 3 rules
// rule 1 Rule('S', "LS",0.868534, 3))
float tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[S*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r1logprob*BT + tmp);
// rule 5 Rule('F', "LS",0.21236, 3)
tmp = -1e20f;
for (int k = j + b + 1; k < len; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + j*len + k] + inside[S*len*len + (j+b+1)*len + k]);
}
index = L*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
tmp = -1e20f;
for (int k = 0 ; k < j ; k++)
{
tmp = logsumexp(tmp, outside[F*len*len + k*len + j+b] + inside[L*len*len + k*len + j-1]);
}
index = S*len*len + j*len + j+b;
outside[index] = logsumexp(outside[index], r5logprob*BT + tmp);
// type 2 rules
if ((j>=1) && (j+b+1<len))
{
float v = pairedlogprobs[(j-1)*len+(j+b+1)]*BT;
index = F*len*len + j*len + j+b;
// rule 3 Rule('S', "dFd",0.013856122002, 2)
outside[index] = logsumexp(outside[index], r3logprob*BT + outside[S*len*len + (j-1)*len + j+b+1] + v);
// rule 4 Rule('F', "dFd",0.787640, 2)
outside[index] = logsumexp(outside[index], r4logprob*BT + outside[F*len*len + (j-1)*len + j+b+1] + v);
// rule 7 Rule('L', "dFd",0.105397, 2)
outside[index] = logsumexp(outside[index], r7logprob*BT + outside[L*len*len + (j-1)*len + j+b+1] + v);
}
}
}
/*
int main()
{
int len = 8000;
int N = len;
float* inside = (float*)malloc(3*len*len*sizeof(float));
float* pairedlogprobs = (float*)malloc(len*len*sizeof(float));
float* unpairedlogprobs = (float*)malloc(len*sizeof(float));
float* d_inside;
float* d_pairedlogprobs;
float* d_unpairedlogprobs;
cudaMalloc(&d_inside, 3*len*len*sizeof(float));
cudaMalloc(&d_pairedlogprobs, len*len*sizeof(float));
cudaMalloc(&d_unpairedlogprobs, len*len*sizeof(float));
cudaMemcpy(d_inside, inside, 3*len*len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_pairedlogprobs, pairedlogprobs, len*len*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_unpairedlogprobs, unpairedlogprobs, len*sizeof(float), cudaMemcpyHostToDevice);
for(int b=1 ; b < len ; b++)
{
insidealgorithm<<<(N+511)/512, 512>>>(d_inside, d_pairedlogprobs, d_unpairedlogprobs, b, len, 1.0);
}
int code = cudaMemcpy(inside, d_inside, 3*len*len*sizeof(float), cudaMemcpyDeviceToHost);
printf("exitcode %d\n", code);
printf("Z %f\n", inside[len-1]);
}*/
|
96b7c4aef1ba80494fd4b170dbc73f3a7b334ce1.hip | // !!! This is a file automatically generated by hipify!!!
#include "sphere_hip.cuh"
namespace ray_tracer {
RT_DEVICE Sphere::Sphere(const Point3& origin, const float radius, Material* material) :
center_(origin), radius_(radius), radius_inv_(1.0f / radius), radius_2_(radius* radius)
{
material_ = material;
}
RT_DEVICE bool Sphere::hit(const Ray& ray, const float t_min, const float t_max, HitRecord& rec) const
{
const auto oc = ray.origin() - center_;
const auto a = ray.direction().length_squared();
const auto b_half = dot(ray.direction(), oc);
const auto c = oc.length_squared() - radius_2_;
const auto disc = b_half * b_half - a * c;
if (disc < 0.0f)
return false;
const auto root = std::sqrt(disc);
const auto a_inv = 1.0f / a;
auto t = (-b_half - root) * a_inv;
for (size_t i = 0; i < 2; ++i, t = (-b_half + root) * a_inv)
{
if (t < t_max && t > t_min)
{
rec.t = t;
rec.hit_point = ray.at(t);
const auto outward_normal = (rec.hit_point - center_) * radius_inv_;
rec.set_face_normal(ray, outward_normal);
get_uv_coordinates(outward_normal, rec.u, rec.v);
rec.material = material_;
return true;
}
}
// Out of bounds
return false;
}
RT_DEVICE bool Sphere::bounding_box(const float ti, const float tf, AABB& box_out) const
{
box_out = AABB{
center_ - Vec3{ radius_, radius_, radius_ },
center_ + Vec3{ radius_, radius_, radius_ }
};
return true;
}
RT_DEVICE void Sphere::get_uv_coordinates(const Point3& point, float& u, float& v)
{
const auto theta = std::acosf(-point.y());
const auto phi = std::atan2f(-point.z(), point.x()) + kPi;
u = phi * k1by2Pi;
v = theta * k1byPi;
}
} // namespace ray_tracer
| 96b7c4aef1ba80494fd4b170dbc73f3a7b334ce1.cu | #include "sphere.cuh"
namespace ray_tracer {
RT_DEVICE Sphere::Sphere(const Point3& origin, const float radius, Material* material) :
center_(origin), radius_(radius), radius_inv_(1.0f / radius), radius_2_(radius* radius)
{
material_ = material;
}
RT_DEVICE bool Sphere::hit(const Ray& ray, const float t_min, const float t_max, HitRecord& rec) const
{
const auto oc = ray.origin() - center_;
const auto a = ray.direction().length_squared();
const auto b_half = dot(ray.direction(), oc);
const auto c = oc.length_squared() - radius_2_;
const auto disc = b_half * b_half - a * c;
if (disc < 0.0f)
return false;
const auto root = std::sqrt(disc);
const auto a_inv = 1.0f / a;
auto t = (-b_half - root) * a_inv;
for (size_t i = 0; i < 2; ++i, t = (-b_half + root) * a_inv)
{
if (t < t_max && t > t_min)
{
rec.t = t;
rec.hit_point = ray.at(t);
const auto outward_normal = (rec.hit_point - center_) * radius_inv_;
rec.set_face_normal(ray, outward_normal);
get_uv_coordinates(outward_normal, rec.u, rec.v);
rec.material = material_;
return true;
}
}
// Out of bounds
return false;
}
RT_DEVICE bool Sphere::bounding_box(const float ti, const float tf, AABB& box_out) const
{
box_out = AABB{
center_ - Vec3{ radius_, radius_, radius_ },
center_ + Vec3{ radius_, radius_, radius_ }
};
return true;
}
RT_DEVICE void Sphere::get_uv_coordinates(const Point3& point, float& u, float& v)
{
const auto theta = std::acosf(-point.y());
const auto phi = std::atan2f(-point.z(), point.x()) + kPi;
u = phi * k1by2Pi;
v = theta * k1byPi;
}
} // namespace ray_tracer
|
b89f41f90ce610c79c3f0b94d46529ae5ddfc462.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************
* npCase2: Selective Matrix Addition using nested parallelism.
* Author : Fanny Nina-Paravecino
* Date : October 2016
*/
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
using namespace std;
__global__ void childKernelSync(int* A, int *B, int *C, int parentIdxVar)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[parentIdxVar+idx] = A[parentIdxVar+idx] + B[parentIdxVar+idx];
}
__global__ void parentKernelSync(int* A, int *B, int *C, int *npId, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(A[idx*cols] == 1)
{
npId[idx] = idx*cols;
if (cols > 1024){
hipLaunchKernelGGL(( childKernelSync), dim3(cols/1024), dim3(1024), 0, 0, A, B, C, npId[idx]);
hipDeviceSynchronize();
}
else{
hipLaunchKernelGGL(( childKernelSync), dim3(1), dim3(cols), 0, 0, A, B, C, npId[idx]);
hipDeviceSynchronize();
}
}
}
__global__ void singleKernel(int* A, int *B, int *C, int rows, int cols)
{
int idx = blockIdx.x *blockDim.x + threadIdx.x;
if(A[idx*cols] == 1)
{
for(int i=0; i < cols; i++)
C[idx*cols+i] = A[idx*cols+i]+B[idx*cols+i];
}
}
void printOutput(int *A, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++){
printf("%d ", A[i*cols+j]);
}
printf("\n");
}
}
bool check(int *c1, int *c2, int rows, int cols){
bool same = true;
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++){
if(c1[i*cols+j] != c2[i*cols+j]){
printf("ERROR...[%d %d] ", i, j);
same = false;
break;
}
}
if (!same)
break;
}
return same;
}
double getWallTime(){
struct timeval time;
if(gettimeofday(&time,NULL)){
printf("Error getting time\n");
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int getTotalCores(hipDeviceProp_t devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
int fixCores = 0;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
fixCores = 192;
printf("Number of cores per SM: %d\n", fixCores);
cores = mp * fixCores;
break;
case 5: // Maxwell
fixCores = 128;
printf("Number of cores per SM: %d\n", fixCores);
cores = mp * fixCores;
break;
case 6: // Pascal
if (devProp.minor == 1) {
fixCores = 128;
printf("Number of cores per SM: %d\n",fixCores);
cores = mp * fixCores;
}
else if (devProp.minor == 0){
fixCores = 64;
printf("Number of cores per SM: %d\n",fixCores);
cores = mp * fixCores;
}
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void printDevProp(hipDeviceProp_t devProp)
{
printf("Name: %s\n", devProp.name);
printf("Capability: (%d, %d)\n", devProp.major, devProp.minor);
printf("# SM: %d\n", devProp.multiProcessorCount);
printf("Total Cores: %d\n", getTotalCores(devProp));
printf("Clock rate: %d\n", devProp.clockRate);
printf("=================================\n");
return;
}
int main(int argC, char** argV)
{
// Number of CUDA devices
int devCount;
hipGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
int gpu = 0;
hipDeviceProp_t devProp;
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
hipGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
hipSetDevice(gpu);
///*******************************
float div = 75.0f;
int ROWS = 1024, COLS = 1024;
for(int i=1; i<argC; i=i+2)
{
if(strcmp(argV[i], "--size") == 0)
{
if(i+1 < argC)
{
ROWS = atoi(argV[i+1]);
COLS = ROWS;
if(ROWS < 1)
{
cerr << "Size must be greater than 0." << endl;
exit(1);
}
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "--div") == 0){
if(i+1 < argC)
{
div = atof(argV[i+1]);
if(div <= 0)
{
cerr << "Divergence must be greater than 0." << endl;
exit(1);
}
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "--gpu") == 0){
if(i+1 < argC)
{
gpu = atoi(argV[i+1]);
if(gpu < 0)
{
cerr << "GPU index should be a positive index of the array of GPU." << endl;
exit(1);
}
break;
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "-h") == 0 || strcmp(argV[i], "--help") == 0)
{
cout << "Usage: " << argV[0] << " [OPTIONS] --size <number> --div <number> --gpu <number> " << endl;
cout << " -h, --help Display this information and exit." << endl;
exit(0);
}
else
{
cerr << "Did not recognize '" << argV[i] << "'. Try '" << argV[0]
<< " --help' for additional information." << endl;
exit(1);
}
}
printf("NP - Characterization: %f percentage of divergence\n", div);
printf("NP Case2 Matrix Addition: [%d x %d]\n", ROWS, COLS);
hipSetDevice(gpu);
hipGetDeviceProperties(&devProp, gpu);
printf("GPU: %s\n", devProp.name);
int *a = (int*) malloc(ROWS*COLS*sizeof(int));
int *b = (int*) malloc(ROWS*COLS*sizeof(int));
int *c = (int*) malloc(ROWS*COLS*sizeof(int));
int nroChildKernels = 0;
int rndValue = 0;
while (nroChildKernels < (ROWS*(div/100.0f))){
rndValue = rand()%ROWS;
nroChildKernels++;
for(int j=0; j<COLS; j++){
a[rndValue*COLS+j] = 1;
b[rndValue*COLS+j] = 2;
}
}
printf("Number of child kernels: %d\n", nroChildKernels);
// Sequential
double wallS0, wallS1;
wallS0 = getWallTime();
int *cHost = (int*)malloc(ROWS*COLS*sizeof(int));
for(int i=0; i<ROWS; i++){
if(a[i*COLS] == 1)
for(int j=0; j<COLS; j++){
cHost[i*COLS+j] = a[i*COLS+j] + b[i*COLS+j];
}
}
wallS1 = getWallTime();
printf("\tSequential Job Time: %f ms\n", (wallS1-wallS0)*1000);
// Time variables
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
int *devA;
int *devB;
hipMalloc((void**)&devA, ROWS*COLS*sizeof(int));
hipMalloc((void**)&devB, ROWS*COLS*sizeof(int));
//Copying [A] and [B] from host memory to device memory.
hipMemcpy(devA, a, ROWS*COLS*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devB, b, ROWS*COLS*sizeof(int), hipMemcpyHostToDevice);
// Grid configuration
dim3 threads, blocks;
if (ROWS >1024){
threads.x = 1024; threads.y = 1; threads.z = 1;
blocks.x = ROWS/threads.x; blocks.y = 1; blocks.z = 1;
}
else{
threads.x = ROWS; threads.y = 1; threads.z = 1;
blocks.x = 1; blocks.y = 1; blocks.z = 1;
}
// NP Sync Case ****************************************************************
int *cNpSync = (int*)malloc(ROWS*COLS*sizeof(int));
int *npId = (int*)malloc(ROWS*COLS*sizeof(int));
int *devCSync, *devNpId;
hipMalloc((void**)&devCSync, ROWS*COLS*sizeof(int));
hipMalloc((void**)&devNpId, ROWS*COLS*sizeof(int));
hipMemcpy(devCSync, cNpSync, ROWS*COLS*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(devNpId, npId, ROWS*COLS*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( parentKernelSync), dim3(blocks), dim3(threads), 0, 0, devA, devB, devCSync, devNpId, ROWS, COLS);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
//Display time
hipEventElapsedTime(&time, start, stop);
printf("\tParallel NP Sync Job time: %.2f ms\n", time);
//Retrieve results from device
hipMemcpy(cNpSync, devCSync, ROWS*COLS*sizeof(int), hipMemcpyDeviceToHost);
//Verify correctness
check(cNpSync, cHost, ROWS, COLS) ? printf("Results are correct.\n") : printf("Results are not correct.\n");
hipFree(devA);
hipFree(devB);
hipFree(devCSync);
hipFree(devNpId);
}
| b89f41f90ce610c79c3f0b94d46529ae5ddfc462.cu | /*******************
* npCase2: Selective Matrix Addition using nested parallelism.
* Author : Fanny Nina-Paravecino
* Date : October 2016
*/
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
using namespace std;
__global__ void childKernelSync(int* A, int *B, int *C, int parentIdxVar)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[parentIdxVar+idx] = A[parentIdxVar+idx] + B[parentIdxVar+idx];
}
__global__ void parentKernelSync(int* A, int *B, int *C, int *npId, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(A[idx*cols] == 1)
{
npId[idx] = idx*cols;
if (cols > 1024){
childKernelSync<<<cols/1024, 1024>>>(A, B, C, npId[idx]);
cudaDeviceSynchronize();
}
else{
childKernelSync<<<1, cols>>>(A, B, C, npId[idx]);
cudaDeviceSynchronize();
}
}
}
__global__ void singleKernel(int* A, int *B, int *C, int rows, int cols)
{
int idx = blockIdx.x *blockDim.x + threadIdx.x;
if(A[idx*cols] == 1)
{
for(int i=0; i < cols; i++)
C[idx*cols+i] = A[idx*cols+i]+B[idx*cols+i];
}
}
void printOutput(int *A, int rows, int cols)
{
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++){
printf("%d ", A[i*cols+j]);
}
printf("\n");
}
}
bool check(int *c1, int *c2, int rows, int cols){
bool same = true;
for(int i=0; i < rows; i++)
{
for(int j=0; j < cols; j++){
if(c1[i*cols+j] != c2[i*cols+j]){
printf("ERROR...[%d %d] ", i, j);
same = false;
break;
}
}
if (!same)
break;
}
return same;
}
double getWallTime(){
struct timeval time;
if(gettimeofday(&time,NULL)){
printf("Error getting time\n");
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
int getTotalCores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
int fixCores = 0;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
fixCores = 192;
printf("Number of cores per SM: %d\n", fixCores);
cores = mp * fixCores;
break;
case 5: // Maxwell
fixCores = 128;
printf("Number of cores per SM: %d\n", fixCores);
cores = mp * fixCores;
break;
case 6: // Pascal
if (devProp.minor == 1) {
fixCores = 128;
printf("Number of cores per SM: %d\n",fixCores);
cores = mp * fixCores;
}
else if (devProp.minor == 0){
fixCores = 64;
printf("Number of cores per SM: %d\n",fixCores);
cores = mp * fixCores;
}
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void printDevProp(cudaDeviceProp devProp)
{
printf("Name: %s\n", devProp.name);
printf("Capability: (%d, %d)\n", devProp.major, devProp.minor);
printf("# SM: %d\n", devProp.multiProcessorCount);
printf("Total Cores: %d\n", getTotalCores(devProp));
printf("Clock rate: %d\n", devProp.clockRate);
printf("=================================\n");
return;
}
int main(int argC, char** argV)
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
int gpu = 0;
cudaDeviceProp devProp;
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
cudaSetDevice(gpu);
///*******************************
float div = 75.0f;
int ROWS = 1024, COLS = 1024;
for(int i=1; i<argC; i=i+2)
{
if(strcmp(argV[i], "--size") == 0)
{
if(i+1 < argC)
{
ROWS = atoi(argV[i+1]);
COLS = ROWS;
if(ROWS < 1)
{
cerr << "Size must be greater than 0." << endl;
exit(1);
}
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "--div") == 0){
if(i+1 < argC)
{
div = atof(argV[i+1]);
if(div <= 0)
{
cerr << "Divergence must be greater than 0." << endl;
exit(1);
}
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "--gpu") == 0){
if(i+1 < argC)
{
gpu = atoi(argV[i+1]);
if(gpu < 0)
{
cerr << "GPU index should be a positive index of the array of GPU." << endl;
exit(1);
}
break;
}
else
{
printf("Error...\n");
exit(1);
}
}
else if(strcmp(argV[i], "-h") == 0 || strcmp(argV[i], "--help") == 0)
{
cout << "Usage: " << argV[0] << " [OPTIONS] --size <number> --div <number> --gpu <number> " << endl;
cout << " -h, --help Display this information and exit." << endl;
exit(0);
}
else
{
cerr << "Did not recognize '" << argV[i] << "'. Try '" << argV[0]
<< " --help' for additional information." << endl;
exit(1);
}
}
printf("NP - Characterization: %f percentage of divergence\n", div);
printf("NP Case2 Matrix Addition: [%d x %d]\n", ROWS, COLS);
cudaSetDevice(gpu);
cudaGetDeviceProperties(&devProp, gpu);
printf("GPU: %s\n", devProp.name);
int *a = (int*) malloc(ROWS*COLS*sizeof(int));
int *b = (int*) malloc(ROWS*COLS*sizeof(int));
int *c = (int*) malloc(ROWS*COLS*sizeof(int));
int nroChildKernels = 0;
int rndValue = 0;
while (nroChildKernels < (ROWS*(div/100.0f))){
rndValue = rand()%ROWS;
nroChildKernels++;
for(int j=0; j<COLS; j++){
a[rndValue*COLS+j] = 1;
b[rndValue*COLS+j] = 2;
}
}
printf("Number of child kernels: %d\n", nroChildKernels);
// Sequential
double wallS0, wallS1;
wallS0 = getWallTime();
int *cHost = (int*)malloc(ROWS*COLS*sizeof(int));
for(int i=0; i<ROWS; i++){
if(a[i*COLS] == 1)
for(int j=0; j<COLS; j++){
cHost[i*COLS+j] = a[i*COLS+j] + b[i*COLS+j];
}
}
wallS1 = getWallTime();
printf("\tSequential Job Time: %f ms\n", (wallS1-wallS0)*1000);
// Time variables
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *devA;
int *devB;
cudaMalloc((void**)&devA, ROWS*COLS*sizeof(int));
cudaMalloc((void**)&devB, ROWS*COLS*sizeof(int));
//Copying [A] and [B] from host memory to device memory.
cudaMemcpy(devA, a, ROWS*COLS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, ROWS*COLS*sizeof(int), cudaMemcpyHostToDevice);
// Grid configuration
dim3 threads, blocks;
if (ROWS >1024){
threads.x = 1024; threads.y = 1; threads.z = 1;
blocks.x = ROWS/threads.x; blocks.y = 1; blocks.z = 1;
}
else{
threads.x = ROWS; threads.y = 1; threads.z = 1;
blocks.x = 1; blocks.y = 1; blocks.z = 1;
}
// NP Sync Case ****************************************************************
int *cNpSync = (int*)malloc(ROWS*COLS*sizeof(int));
int *npId = (int*)malloc(ROWS*COLS*sizeof(int));
int *devCSync, *devNpId;
cudaMalloc((void**)&devCSync, ROWS*COLS*sizeof(int));
cudaMalloc((void**)&devNpId, ROWS*COLS*sizeof(int));
cudaMemcpy(devCSync, cNpSync, ROWS*COLS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(devNpId, npId, ROWS*COLS*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
parentKernelSync<<<blocks, threads>>>(devA, devB, devCSync, devNpId, ROWS, COLS);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Display time
cudaEventElapsedTime(&time, start, stop);
printf("\tParallel NP Sync Job time: %.2f ms\n", time);
//Retrieve results from device
cudaMemcpy(cNpSync, devCSync, ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);
//Verify correctness
check(cNpSync, cHost, ROWS, COLS) ? printf("Results are correct.\n") : printf("Results are not correct.\n");
cudaFree(devA);
cudaFree(devB);
cudaFree(devCSync);
cudaFree(devNpId);
}
|
97f5447d8003adece1d2760d443d5554d6784e72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "prod.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity,
const int axis) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
ProdReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) ProdReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity,
const int axis) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
ProdReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) ProdReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void ProdTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
const T initial_value = (task->futures.size() == 1) ? task->futures[0].get_result<T>() : ProdReduction<T>::identity;
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_fill_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, initial_value, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_fill_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, initial_value, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called ProdReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_prod_2d<T>);
hipLaunchKernelGGL(( legate_prod_2d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, ProdReduction<T>::identity, axis);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_prod_3d<T>);
hipLaunchKernelGGL(( legate_prod_3d<T>), dim3(blocks), dim3(threads), 0, 0, inout, in, rect, ProdReduction<T>::identity, axis);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(ProdTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_1d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 1> in, const Point<1> origin, const size_t max,
const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
value = in[x];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_2d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 2> in, const Point<2> origin, const Point<1> pitch,
const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
value = in[x][y];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_3d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 3> in, const Point<3> origin, const Point<2> pitch,
const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
value = in[x][y][z];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_buffer_prod_reduce(const DeferredBuffer<T, 1> in, const DeferredBuffer<T, 1> out, const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) value = in.read(offset);
fold_output(out, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_final_prod_reduce(const DeferredBuffer<T, 1> in, const DeferredReduction<ProdReduction<T>> out, const size_t max,
const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) value = in.read(offset);
reduce_output(out, value);
}
template<typename T>
/*static*/ DeferredReduction<ProdReduction<T>>
ProdReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
DeferredBuffer<T, 1> bufferA;
size_t volume = 0, blocks = 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
hipLaunchKernelGGL(( legate_prod_reduce_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, bufferA, in, rect.lo, volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_prod_reduce_2d<T>)
, dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, bufferA, in, rect.lo, Point<1>(pitch), volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_prod_reduce_3d<T>)
, dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, bufferA, in, rect.lo, Point<2>(pitch), volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
default:
assert(false);
}
// Continue reducing buffers until we get down to one small enough that
// it can be handled by a single CTA and then we can do the final launch
DeferredBuffer<T, 1> last = bufferA;
if (volume > THREADS_PER_BLOCK) {
DeferredBuffer<T, 1> bufferB;
bool b_initialized = false;
bool forward = true;
while (volume > THREADS_PER_BLOCK) {
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (!b_initialized) {
Rect<1> bounds = Rect<1>(Point<1>(0), Point<1>(blocks - 1));
bufferB = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
b_initialized = true;
}
if (forward) {
hipLaunchKernelGGL(( legate_buffer_prod_reduce<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, bufferA, bufferB, volume, ProdReduction<T>::identity);
forward = false;
} else {
hipLaunchKernelGGL(( legate_buffer_prod_reduce<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, bufferB, bufferA, volume, ProdReduction<T>::identity);
forward = true;
}
volume = blocks;
}
if (!forward) last = bufferB;
}
DeferredReduction<ProdReduction<T>> result;
// One last kernel launch to do the final reduction to a single value
if (volume > 0)hipLaunchKernelGGL(( legate_final_prod_reduce<T>), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, last, result, volume, ProdReduction<T>::identity);
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(ProdReducTask, ProdReduction, gpu_variant)
template<typename T, int DIM>
struct ProdRadixArgs {
AccessorRO<T, DIM> in[MAX_REDUCTION_RADIX];
};
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_1d(const AccessorWO<T, 1> out, const ProdRadixArgs<T, 1> args, const size_t argmax, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
T val = args.in[0][x];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x]);
out[x] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_2d(const AccessorWO<T, 2> out, const ProdRadixArgs<T, 2> args, const size_t argmax, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = args.in[0][x][y];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y]);
out[x][y] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_3d(const AccessorWO<T, 3> out, const ProdRadixArgs<T, 3> args, const size_t argmax, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = args.in[0][x][y][z];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y][z]);
out[x][y][z] = val;
}
template<typename T>
/*static*/ void ProdRadixTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
assert(task->regions.size() <= MAX_REDUCTION_RADIX);
const int radix = derez.unpack_dimension();
const int extra_dim_out = derez.unpack_dimension();
const int extra_dim_in = derez.unpack_dimension();
const int dim = derez.unpack_dimension();
const coord_t offset = (extra_dim_in >= 0) ? task->index_point[extra_dim_in] * radix : 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 1> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
ProdRadixArgs<T, 1> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 1>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_prod_radix_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 2> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
ProdRadixArgs<T, 2> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 2>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_prod_radix_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 3> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 3>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 3>(regions[0], rect);
ProdRadixArgs<T, 3> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 3>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_prod_radix_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, args, num_inputs, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(ProdRadixTask, gpu_variant)
} // namespace numpy
} // namespace legate
| 97f5447d8003adece1d2760d443d5554d6784e72.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "fill.cuh"
#include "prod.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_2d(const AccessorRW<T, 2> inout, const AccessorRO<T, 2> in, const Rect<2> bounds, const T identity,
const int axis) {
coord_t y = bounds.lo[1] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t x = bounds.lo[0] + (blockIdx.z * gridDim.y + blockIdx.y) * blockDim.y + threadIdx.y;
const Point<2> p(x, y);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
x += gridDim.z * gridDim.y * blockDim.y;
}
} else {
while (y <= bounds.hi[1]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y]);
y += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
ProdReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) ProdReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_3d(const AccessorRW<T, 3> inout, const AccessorRO<T, 3> in, const Rect<3> bounds, const T identity,
const int axis) {
coord_t z = bounds.lo[2] + blockIdx.x * blockDim.x + threadIdx.x;
coord_t y = bounds.lo[1] + blockIdx.y * blockDim.y + threadIdx.y;
coord_t x = bounds.lo[0] + blockIdx.z * blockDim.z + threadIdx.z;
const Point<3> p(x, y, z);
if (!bounds.contains(p)) return;
T value = identity;
if (axis == 0) {
while (x <= bounds.hi[0]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
x += gridDim.z * blockDim.z;
}
} else if (axis == 1) {
while (y <= bounds.hi[1]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
y += gridDim.y * blockDim.y;
}
} else {
while (z <= bounds.hi[2]) {
ProdReduction<T>::template fold<true /*exclusive*/>(value, in[x][y][z]);
z += gridDim.x * blockDim.x;
}
#if __CUDA_ARCH__ >= 700
__shared__ T trampoline[THREADS_PER_BLOCK];
// Check for the case where all the threads in the same warp have
// the same x value in which case they're all going to conflict
// so instead we do a warp-level reduction so just one thread ends
// up doing the full atomic
const int same_mask = __match_any_sync(0xffffffff, threadIdx.z * blockDim.y + threadIdx.y);
int laneid;
asm volatile("mov.s32 %0, %laneid;" : "=r"(laneid));
const int active_mask = __ballot_sync(0xffffffff, same_mask - (1 << laneid));
if (active_mask) {
// Store our data into shared
const int tid = (threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x;
trampoline[tid] = value;
// Make sure all the threads in the warp are done writing
__syncwarp(active_mask);
// Have the lowest thread in each mask pull in the values
int lowest_index = -1;
for (int i = 0; i < warpSize; i++)
if (same_mask & (1 << i)) {
if (lowest_index == -1) {
if (i != laneid) {
// We're not the lowest thread in the warp for
// this value so we're done, set the value back
// to identity to ensure that we don't try to
// perform the reduction out to memory
value = identity;
break;
} else // Make sure we don't do this test again
lowest_index = i;
// It was already our value, so just keep going
} else {
// Pull in the value from shared memory
const int index = tid + i - laneid;
ProdReduction<T>::template fold<true /*exclusive*/>(value, trampoline[index]);
}
}
}
#endif
}
if (value != identity) ProdReduction<T>::template fold<false /*exclusive*/>(inout[p], value);
}
template<typename T>
/*static*/ void ProdTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int axis = derez.unpack_dimension();
const int collapse_dim = derez.unpack_dimension();
const int init_dim = derez.unpack_dimension();
const T initial_value = (task->futures.size() == 1) ? task->futures[0].get_result<T>() : ProdReduction<T>::identity;
switch (init_dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 1> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_fill_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, initial_value, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorWO<T, 2> out =
(collapse_dim >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_fill_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, initial_value, rect.lo, Point<1>(pitch), volume);
break;
}
default:
assert(false); // shouldn't see any other cases
}
const int dim = derez.unpack_dimension();
switch (dim) {
// Should never get the case of 1 as this would just be a copy since
// reducing our only dimension should have called ProdReducTask
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 2> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 2, 1>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_2d_reduction(blocks, threads, rect, axis, (const void*)legate_prod_2d<T>);
legate_prod_2d<T><<<blocks, threads>>>(inout, in, rect, ProdReduction<T>::identity, axis);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) return;
const AccessorRW<T, 3> inout =
(collapse_dim >= 0) ? derez.unpack_accessor_RW<T, 3, 2>(regions[0], rect, collapse_dim, task->index_point[collapse_dim])
: derez.unpack_accessor_RW<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
// Figure out how many blocks and threads we need
dim3 threads(1, 1, 1);
dim3 blocks(1, 1, 1);
raster_3d_reduction(blocks, threads, rect, axis, (const void*)legate_prod_3d<T>);
legate_prod_3d<T><<<blocks, threads>>>(inout, in, rect, ProdReduction<T>::identity, axis);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(ProdTask, gpu_variant)
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_1d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 1> in, const Point<1> origin, const size_t max,
const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset;
value = in[x];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_2d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 2> in, const Point<2> origin, const Point<1> pitch,
const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
value = in[x][y];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_reduce_3d(const DeferredBuffer<T, 1> buffer, const AccessorRO<T, 3> in, const Point<3> origin, const Point<2> pitch,
const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) {
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
value = in[x][y][z];
}
fold_output(buffer, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_buffer_prod_reduce(const DeferredBuffer<T, 1> in, const DeferredBuffer<T, 1> out, const size_t max, const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) value = in.read(offset);
fold_output(out, value, ProdReduction<T>{});
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_final_prod_reduce(const DeferredBuffer<T, 1> in, const DeferredReduction<ProdReduction<T>> out, const size_t max,
const T identity) {
T value = identity;
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset < max) value = in.read(offset);
reduce_output(out, value);
}
template<typename T>
/*static*/ DeferredReduction<ProdReduction<T>>
ProdReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
DeferredBuffer<T, 1> bufferA;
size_t volume = 0, blocks = 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
legate_prod_reduce_1d<T><<<blocks, THREADS_PER_BLOCK>>>(bufferA, in, rect.lo, volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_prod_reduce_2d<T>
<<<blocks, THREADS_PER_BLOCK>>>(bufferA, in, rect.lo, Point<1>(pitch), volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
volume = rect.volume();
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
Rect<1> bounds(Point<1>(0), Point<1>(blocks - 1));
bufferA = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_prod_reduce_3d<T>
<<<blocks, THREADS_PER_BLOCK>>>(bufferA, in, rect.lo, Point<2>(pitch), volume, ProdReduction<T>::identity);
volume = blocks;
break;
}
default:
assert(false);
}
// Continue reducing buffers until we get down to one small enough that
// it can be handled by a single CTA and then we can do the final launch
DeferredBuffer<T, 1> last = bufferA;
if (volume > THREADS_PER_BLOCK) {
DeferredBuffer<T, 1> bufferB;
bool b_initialized = false;
bool forward = true;
while (volume > THREADS_PER_BLOCK) {
blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
if (!b_initialized) {
Rect<1> bounds = Rect<1>(Point<1>(0), Point<1>(blocks - 1));
bufferB = DeferredBuffer<T, 1>(Memory::GPU_FB_MEM, Domain(bounds));
b_initialized = true;
}
if (forward) {
legate_buffer_prod_reduce<T><<<blocks, THREADS_PER_BLOCK>>>(bufferA, bufferB, volume, ProdReduction<T>::identity);
forward = false;
} else {
legate_buffer_prod_reduce<T><<<blocks, THREADS_PER_BLOCK>>>(bufferB, bufferA, volume, ProdReduction<T>::identity);
forward = true;
}
volume = blocks;
}
if (!forward) last = bufferB;
}
DeferredReduction<ProdReduction<T>> result;
// One last kernel launch to do the final reduction to a single value
if (volume > 0) legate_final_prod_reduce<T><<<1, THREADS_PER_BLOCK>>>(last, result, volume, ProdReduction<T>::identity);
return result;
}
INSTANTIATE_DEFERRED_REDUCTION_TASK_VARIANT(ProdReducTask, ProdReduction, gpu_variant)
template<typename T, int DIM>
struct ProdRadixArgs {
AccessorRO<T, DIM> in[MAX_REDUCTION_RADIX];
};
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_1d(const AccessorWO<T, 1> out, const ProdRadixArgs<T, 1> args, const size_t argmax, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset;
T val = args.in[0][x];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x]);
out[x] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_2d(const AccessorWO<T, 2> out, const ProdRadixArgs<T, 2> args, const size_t argmax, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
T val = args.in[0][x][y];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y]);
out[x][y] = val;
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_prod_radix_3d(const AccessorWO<T, 3> out, const ProdRadixArgs<T, 3> args, const size_t argmax, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
if (offset >= max) return;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
T val = args.in[0][x][y][z];
for (unsigned idx = 1; idx < argmax; idx++)
ProdReduction<T>::template fold<true /*exclusive*/>(val, args.in[idx][x][y][z]);
out[x][y][z] = val;
}
template<typename T>
/*static*/ void ProdRadixTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx,
Runtime* runtime) {
LegateDeserializer derez(task->args, task->arglen);
assert(task->regions.size() <= MAX_REDUCTION_RADIX);
const int radix = derez.unpack_dimension();
const int extra_dim_out = derez.unpack_dimension();
const int extra_dim_in = derez.unpack_dimension();
const int dim = derez.unpack_dimension();
const coord_t offset = (extra_dim_in >= 0) ? task->index_point[extra_dim_in] * radix : 0;
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 1> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 1>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 1>(regions[0], rect);
ProdRadixArgs<T, 1> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 1>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_prod_radix_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 2> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 2>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 2>(regions[0], rect);
ProdRadixArgs<T, 2> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
if (task->regions[idx].region.exists())
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 2>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_prod_radix_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorWO<T, 3> out =
(extra_dim_out >= 0) ? derez.unpack_accessor_WO<T, 3>(regions[0], rect, extra_dim_out, task->index_point[extra_dim_out])
: derez.unpack_accessor_WO<T, 3>(regions[0], rect);
ProdRadixArgs<T, 3> args;
unsigned num_inputs = 0;
for (unsigned idx = 1; idx < task->regions.size(); idx++)
args.in[num_inputs++] = derez.unpack_accessor_RO<T, 3>(regions[idx], rect, extra_dim_in, offset + idx - 1);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_prod_radix_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, args, num_inputs, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
}
INSTANTIATE_TASK_VARIANT(ProdRadixTask, gpu_variant)
} // namespace numpy
} // namespace legate
|
b00b8cd611ba3919db8898b84bc7fc08c4b4bdd6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Tridiagonal solvers.
* Device code for sweep solver (one-system-per-thread).
*
* NVIDIA, Nikolai Sakharnykh, 2009
*/
// solves a bunch of tridiagonal linear systems
// much better performance when doing data reordering before
// so that all memory accesses are coalesced
__global__ void sweep_small_systems_local_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 1;
int base_idx = reorder ? i : i * system_size;
// local memory
float a[128];
float c1, c2, c3;
float f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = c_d[base_idx];
c2 = b_d[base_idx];
f_i = d_d[base_idx];
#ifndef NATIVE_DIVIDE
a[1] = - c1 / c2;
x_prev = f_i / c2;
#else
a[1] = - __fdiv_rn(c1, c2);
x_prev = __fdiv_rn(f_i, c2);
#endif
// forward trace
int idx = base_idx;
x_d[base_idx] = x_prev;
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = c_d[idx];
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * a[k] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
a[k+1] = - c1 * t;
}
idx += stride;
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * a[system_size-1] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = x_d[idx];
x_next += x_prev * a[k+1];
x_d[idx] = x_prev = x_next;
}
}
__device__
inline int getLocalIdx(int i, int k, int num_systems)
{
return i + num_systems * k;
// uncomment for uncoalesced mem access
// return k + system_size * i;
}
__global__ void sweep_small_systems_global_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
float*__restrict__ w_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 1;
int base_idx = reorder ? i : i * system_size;
float c1, c2, c3;
float f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = c_d[base_idx];
c2 = b_d[base_idx];
f_i = d_d[base_idx];
#ifndef NATIVE_DIVIDE
w_d[getLocalIdx(i, 1, num_systems)] = - c1 / c2;
x_prev = f_i / c2;
#else
w_d[getLocalIdx(i, 1, num_systems)] = __fdiv_rn(-c1, c2);
x_prev = __fdiv_rn(f_i, c2);
#endif
// forward trace
int idx = base_idx;
x_d[base_idx] = x_prev;
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = c_d[idx];
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * w_d[getLocalIdx(i, k, num_systems)] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
w_d[getLocalIdx(i, k+1, num_systems)] = - c1 * t;
}
idx += stride;
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * w_d[getLocalIdx(i, system_size-1, num_systems)] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = x_d[idx];
x_next += x_prev * w_d[getLocalIdx(i, k+1, num_systems)];
x_d[idx] = x_prev = x_next;
}
}
__device__
inline float4 load(const float* a, int i)
{
return make_float4(a[i], a[i+1], a[i+2], a[i+3]);
}
__device__
inline void store(float* a, int i, float4 v)
{
a[i] = v.x;
a[i+1] = v.y;
a[i+2] = v.z;
a[i+3] = v.w;
}
inline __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
inline __device__ float4 operator/(float4 a, float4 b)
{
return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
inline __device__ float4 operator+(float4 a, float4 b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __device__ float4 operator-(float4 a, float4 b)
{
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ float4 native_divide(float4 a, float4 b)
{
return make_float4(
__fdiv_rn(a.x , b.x),
__fdiv_rn(a.y , b.y),
__fdiv_rn(a.z , b.z),
__fdiv_rn(a.w , b.w));
}
inline __device__ float4 native_recip(float4 a)
{
return make_float4(
__frcp_rn(a.x),
__frcp_rn(a.y),
__frcp_rn(a.z),
__frcp_rn(a.w));
}
inline __device__ float4 operator-(float4 &a)
{
return make_float4(-a.x, -a.y, -a.z, -a.w);
}
inline __device__ void operator+=(float4 &a, float4 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
__global__ void sweep_small_systems_global_vec4_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
float*__restrict__ w_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = j << 2;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 4;
int base_idx = reorder ? i : i * system_size;
float4 c1, c2, c3;
float4 f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = load(c_d, base_idx);
c2 = load(b_d, base_idx);
f_i = load(d_d, base_idx);
#ifndef NATIVE_DIVIDE
store(w_d, getLocalIdx(i, 1, num_systems), - c1 / c2);
x_prev = f_i / c2;
#else
store(w_d, getLocalIdx(i, 1, num_systems), native_divide(-c1, c2));
x_prev = native_divide(f_i, c2);
#endif
// forward trace
int idx = base_idx;
store(x_d, base_idx, x_prev);
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = load(c_d, idx);
c2 = load(b_d, idx);
c3 = load(a_d, idx);
f_i = load(d_d, idx);
float4 q = (c3 * load(w_d, getLocalIdx(i, k, num_systems)) + c2);
#ifndef NATIVE_DIVIDE
float4 t = make_float4(1,1,1,1) / q;
#else
float4 t = native_recip(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_prev = x_next;
store(x_d, idx, x_prev);
store(w_d, getLocalIdx(i, k+1, num_systems), - c1 * t);
}
idx += stride;
c2 = load(b_d, idx);
c3 = load(a_d, idx);
f_i = load(d_d, idx);
float4 q = (c3 * load(w_d, getLocalIdx(i, system_size-1, num_systems)) + c2);
#ifndef NATIVE_DIVIDE
float4 t = make_float4(1,1,1,1) / q;
#else
float4 t = native_recip(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_prev = x_next;
store(x_d, idx, x_prev);
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = load(x_d, idx);
x_next += x_prev * load(w_d, getLocalIdx(i, k+1, num_systems));
x_prev = x_next;
store(x_d, idx, x_prev);
}
}
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(
float*__restrict__ odata,
const float*__restrict__ idata,
const int width,
const int height)
{
__shared__ float block[TRANSPOSE_BLOCK_DIM * (TRANSPOSE_BLOCK_DIM+1)];
int blockIdxx = blockIdx.x;
int blockIdxy = blockIdx.y;
int threadIdxx = threadIdx.x;
int threadIdxy = threadIdx.y;
// evaluate coordinates and check bounds
int i0 = __mul24(blockIdxx, BLOCK_DIM) + threadIdxx;
int j0 = __mul24(blockIdxy, BLOCK_DIM) + threadIdxy;
if (i0 >= width || j0 >= height) return;
int i1 = __mul24(blockIdxy, BLOCK_DIM) + threadIdxx;
int j1 = __mul24(blockIdxx, BLOCK_DIM) + threadIdxy;
if (i1 >= height || j1 >= width) return;
int idx_a = i0 + __mul24(j0, width);
int idx_b = i1 + __mul24(j1, height);
// read the tile from global memory into shared memory
block[threadIdxy * (BLOCK_DIM+1) + threadIdxx] = idata[idx_a];
__syncthreads();
// write back to transposed array
odata[idx_b] = block[threadIdxx * (BLOCK_DIM+1) + threadIdxy];
}
| b00b8cd611ba3919db8898b84bc7fc08c4b4bdd6.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
* Tridiagonal solvers.
* Device code for sweep solver (one-system-per-thread).
*
* NVIDIA, Nikolai Sakharnykh, 2009
*/
// solves a bunch of tridiagonal linear systems
// much better performance when doing data reordering before
// so that all memory accesses are coalesced
__global__ void sweep_small_systems_local_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 1;
int base_idx = reorder ? i : i * system_size;
// local memory
float a[128];
float c1, c2, c3;
float f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = c_d[base_idx];
c2 = b_d[base_idx];
f_i = d_d[base_idx];
#ifndef NATIVE_DIVIDE
a[1] = - c1 / c2;
x_prev = f_i / c2;
#else
a[1] = - __fdiv_rn(c1, c2);
x_prev = __fdiv_rn(f_i, c2);
#endif
// forward trace
int idx = base_idx;
x_d[base_idx] = x_prev;
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = c_d[idx];
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * a[k] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
a[k+1] = - c1 * t;
}
idx += stride;
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * a[system_size-1] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = x_d[idx];
x_next += x_prev * a[k+1];
x_d[idx] = x_prev = x_next;
}
}
__device__
inline int getLocalIdx(int i, int k, int num_systems)
{
return i + num_systems * k;
// uncomment for uncoalesced mem access
// return k + system_size * i;
}
__global__ void sweep_small_systems_global_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
float*__restrict__ w_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 1;
int base_idx = reorder ? i : i * system_size;
float c1, c2, c3;
float f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = c_d[base_idx];
c2 = b_d[base_idx];
f_i = d_d[base_idx];
#ifndef NATIVE_DIVIDE
w_d[getLocalIdx(i, 1, num_systems)] = - c1 / c2;
x_prev = f_i / c2;
#else
w_d[getLocalIdx(i, 1, num_systems)] = __fdiv_rn(-c1, c2);
x_prev = __fdiv_rn(f_i, c2);
#endif
// forward trace
int idx = base_idx;
x_d[base_idx] = x_prev;
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = c_d[idx];
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * w_d[getLocalIdx(i, k, num_systems)] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
w_d[getLocalIdx(i, k+1, num_systems)] = - c1 * t;
}
idx += stride;
c2 = b_d[idx];
c3 = a_d[idx];
f_i = d_d[idx];
float q = (c3 * w_d[getLocalIdx(i, system_size-1, num_systems)] + c2);
#ifndef NATIVE_DIVIDE
float t = 1 / q;
#else
float t = __frcp_rn(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_d[idx] = x_prev = x_next;
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = x_d[idx];
x_next += x_prev * w_d[getLocalIdx(i, k+1, num_systems)];
x_d[idx] = x_prev = x_next;
}
}
__device__
inline float4 load(const float* a, int i)
{
return make_float4(a[i], a[i+1], a[i+2], a[i+3]);
}
__device__
inline void store(float* a, int i, float4 v)
{
a[i] = v.x;
a[i+1] = v.y;
a[i+2] = v.z;
a[i+3] = v.w;
}
inline __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
inline __device__ float4 operator/(float4 a, float4 b)
{
return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);
}
inline __device__ float4 operator+(float4 a, float4 b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __device__ float4 operator-(float4 a, float4 b)
{
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ float4 native_divide(float4 a, float4 b)
{
return make_float4(
__fdiv_rn(a.x , b.x),
__fdiv_rn(a.y , b.y),
__fdiv_rn(a.z , b.z),
__fdiv_rn(a.w , b.w));
}
inline __device__ float4 native_recip(float4 a)
{
return make_float4(
__frcp_rn(a.x),
__frcp_rn(a.y),
__frcp_rn(a.z),
__frcp_rn(a.w));
}
inline __device__ float4 operator-(float4 &a)
{
return make_float4(-a.x, -a.y, -a.z, -a.w);
}
inline __device__ void operator+=(float4 &a, float4 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
__global__ void sweep_small_systems_global_vec4_kernel(
const float*__restrict__ a_d,
const float*__restrict__ b_d,
const float*__restrict__ c_d,
const float*__restrict__ d_d,
float*__restrict__ x_d,
float*__restrict__ w_d,
const int system_size,
const int num_systems,
const bool reorder)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = j << 2;
// need to check for in-bounds because of the thread block size
if (i >= num_systems) return;
int stride = reorder ? num_systems: 4;
int base_idx = reorder ? i : i * system_size;
float4 c1, c2, c3;
float4 f_i, x_prev, x_next;
// solving next system:
// c1 * u_i+1 + c2 * u_i + c3 * u_i-1 = f_i
c1 = load(c_d, base_idx);
c2 = load(b_d, base_idx);
f_i = load(d_d, base_idx);
#ifndef NATIVE_DIVIDE
store(w_d, getLocalIdx(i, 1, num_systems), - c1 / c2);
x_prev = f_i / c2;
#else
store(w_d, getLocalIdx(i, 1, num_systems), native_divide(-c1, c2));
x_prev = native_divide(f_i, c2);
#endif
// forward trace
int idx = base_idx;
store(x_d, base_idx, x_prev);
for (int k = 1; k < system_size-1; k++)
{
idx += stride;
c1 = load(c_d, idx);
c2 = load(b_d, idx);
c3 = load(a_d, idx);
f_i = load(d_d, idx);
float4 q = (c3 * load(w_d, getLocalIdx(i, k, num_systems)) + c2);
#ifndef NATIVE_DIVIDE
float4 t = make_float4(1,1,1,1) / q;
#else
float4 t = native_recip(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_prev = x_next;
store(x_d, idx, x_prev);
store(w_d, getLocalIdx(i, k+1, num_systems), - c1 * t);
}
idx += stride;
c2 = load(b_d, idx);
c3 = load(a_d, idx);
f_i = load(d_d, idx);
float4 q = (c3 * load(w_d, getLocalIdx(i, system_size-1, num_systems)) + c2);
#ifndef NATIVE_DIVIDE
float4 t = make_float4(1,1,1,1) / q;
#else
float4 t = native_recip(q);
#endif
x_next = (f_i - c3 * x_prev) * t;
x_prev = x_next;
store(x_d, idx, x_prev);
// backward trace
for (int k = system_size-2; k >= 0; k--)
{
idx -= stride;
x_next = load(x_d, idx);
x_next += x_prev * load(w_d, getLocalIdx(i, k+1, num_systems));
x_prev = x_next;
store(x_d, idx, x_prev);
}
}
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(
float*__restrict__ odata,
const float*__restrict__ idata,
const int width,
const int height)
{
__shared__ float block[TRANSPOSE_BLOCK_DIM * (TRANSPOSE_BLOCK_DIM+1)];
int blockIdxx = blockIdx.x;
int blockIdxy = blockIdx.y;
int threadIdxx = threadIdx.x;
int threadIdxy = threadIdx.y;
// evaluate coordinates and check bounds
int i0 = __mul24(blockIdxx, BLOCK_DIM) + threadIdxx;
int j0 = __mul24(blockIdxy, BLOCK_DIM) + threadIdxy;
if (i0 >= width || j0 >= height) return;
int i1 = __mul24(blockIdxy, BLOCK_DIM) + threadIdxx;
int j1 = __mul24(blockIdxx, BLOCK_DIM) + threadIdxy;
if (i1 >= height || j1 >= width) return;
int idx_a = i0 + __mul24(j0, width);
int idx_b = i1 + __mul24(j1, height);
// read the tile from global memory into shared memory
block[threadIdxy * (BLOCK_DIM+1) + threadIdxx] = idata[idx_a];
__syncthreads();
// write back to transposed array
odata[idx_b] = block[threadIdxx * (BLOCK_DIM+1) + threadIdxy];
}
|
78c8f5764834478802429ac3aa88e19a769b751c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <math.h>
// #include <stdio.h>
#define MAXTHREADS MAX_THREADS
#define PI 3.1415926536
__device__ float normalDistribution(float* x, float* mu,
float* diagonalCovariance, unsigned int dim){
/*
x: individual point being evaluated, x[dim]
mu: mean of the normal distribution being evaluated mu[dim]
diagonalCovariance: for the norm dist diagonalCovariance[dim]
dim: dimensionality of the distribution, also
equal to length of the previous vectors
Evaluates the normalDistribution on the GPU, for
diagonal Covariance Matrices only.
*/
float total = 0;
float det = 1;
float finval = 0;
float denom = 0;
float temp = 0;
for (int i = 0; i < dim; ++i)
{
temp = (x[i]-mu[i]);
temp *= temp; // Square it
total += temp / diagonalCovariance[i];
//Note this is the stuff that goes inside the normal
det *= diagonalCovariance[i];
//TODO: replace with memory implementation instead?
}
// printf("temp = %f, det = %f, total = %f\n", temp, det, total);
total*=-1/2.0;
finval = expf(total);
denom = powf(2*PI, dim) * det;
return (rsqrtf(denom) * finval);
}
__global__ void likelihoodKernel(float *Xpoints, float *means, float *diagCovs,
float *weights,
unsigned int dim, unsigned int numPoints, unsigned int numMixtures,
float* finalLikelihood)
{
/*
All 2d arrays are passed in as row major
Xpoints - 2d array of points, numPoints rows of vectors of dim length
Xpoints[numPoints][dim]
Means - 2d array of means, numMixtures rows of vectors of dim
Means[numMixtures][dim]
diagCovs - 2d array of cov diagonals, ditto
diagCovs[numMixtures][dim]
weights - 1d array of length numMixtures
weights[numMixtures]
numPoints is the actual number of points being evaluated
This is likely to be a subset of what actually needs to be processe
GridDim*BlockDim > numPoints
finalLikelihood: Likelihood value that we want to return
finalLikelihood[blockIdx.x]
Since threads are usually a power of 2, we have to check if we're out of bounds
with regards to the data.
*/
__shared__ float sarray[MAXTHREADS];
//Should be consistently at the max allowed and easier than dynamic allocation
int index = blockIdx.x * blockDim.x + threadIdx.x;
int threadIndex = threadIdx.x;
sarray[threadIndex] = 0;
__syncthreads();
//Following CUDA guidelines here for quick reduction
//TODO: Speed up computation by having a block per mixture?
// If possible, also allows for marginal updates
if (index<numPoints) //Check that we're in bounds!
{
// Just make sure we have stuff to compute
// Will contain the id of the x value
float value = 0;
for (int i = 0; i < numMixtures; ++i)
{
value += weights[i] * normalDistribution(Xpoints+(index*dim), means+(i*dim),
diagCovs+(i*dim), dim);
}
sarray[threadIndex] = logf(value); //Log Likelihood
}
else
{
sarray[threadIndex] = 0.0f; //I.e it's zero
}
// finalLikelihood[threadIndex] = sarray[threadIndex];
// Reduction
__syncthreads();
for (int s = blockDim.x/2; s > 0; s>>=1)
{
//Only works for powers of 2
if (threadIndex<s)
{
sarray[threadIndex] += sarray[threadIndex+s];
}
__syncthreads();
}
if (threadIndex==0)
//Since everything has been synced, sarray[0] now holds our result
{
finalLikelihood[blockIdx.x] = sarray[0];
}
} | 78c8f5764834478802429ac3aa88e19a769b751c.cu | #include <stdlib.h>
#include <math.h>
// #include <stdio.h>
#define MAXTHREADS MAX_THREADS
#define PI 3.1415926536
__device__ float normalDistribution(float* x, float* mu,
float* diagonalCovariance, unsigned int dim){
/*
x: individual point being evaluated, x[dim]
mu: mean of the normal distribution being evaluated mu[dim]
diagonalCovariance: for the norm dist diagonalCovariance[dim]
dim: dimensionality of the distribution, also
equal to length of the previous vectors
Evaluates the normalDistribution on the GPU, for
diagonal Covariance Matrices only.
*/
float total = 0;
float det = 1;
float finval = 0;
float denom = 0;
float temp = 0;
for (int i = 0; i < dim; ++i)
{
temp = (x[i]-mu[i]);
temp *= temp; // Square it
total += temp / diagonalCovariance[i];
//Note this is the stuff that goes inside the normal
det *= diagonalCovariance[i];
//TODO: replace with memory implementation instead?
}
// printf("temp = %f, det = %f, total = %f\n", temp, det, total);
total*=-1/2.0;
finval = expf(total);
denom = powf(2*PI, dim) * det;
return (rsqrtf(denom) * finval);
}
__global__ void likelihoodKernel(float *Xpoints, float *means, float *diagCovs,
float *weights,
unsigned int dim, unsigned int numPoints, unsigned int numMixtures,
float* finalLikelihood)
{
/*
All 2d arrays are passed in as row major
Xpoints - 2d array of points, numPoints rows of vectors of dim length
Xpoints[numPoints][dim]
Means - 2d array of means, numMixtures rows of vectors of dim
Means[numMixtures][dim]
diagCovs - 2d array of cov diagonals, ditto
diagCovs[numMixtures][dim]
weights - 1d array of length numMixtures
weights[numMixtures]
numPoints is the actual number of points being evaluated
This is likely to be a subset of what actually needs to be processe
GridDim*BlockDim > numPoints
finalLikelihood: Likelihood value that we want to return
finalLikelihood[blockIdx.x]
Since threads are usually a power of 2, we have to check if we're out of bounds
with regards to the data.
*/
__shared__ float sarray[MAXTHREADS];
//Should be consistently at the max allowed and easier than dynamic allocation
int index = blockIdx.x * blockDim.x + threadIdx.x;
int threadIndex = threadIdx.x;
sarray[threadIndex] = 0;
__syncthreads();
//Following CUDA guidelines here for quick reduction
//TODO: Speed up computation by having a block per mixture?
// If possible, also allows for marginal updates
if (index<numPoints) //Check that we're in bounds!
{
// Just make sure we have stuff to compute
// Will contain the id of the x value
float value = 0;
for (int i = 0; i < numMixtures; ++i)
{
value += weights[i] * normalDistribution(Xpoints+(index*dim), means+(i*dim),
diagCovs+(i*dim), dim);
}
sarray[threadIndex] = logf(value); //Log Likelihood
}
else
{
sarray[threadIndex] = 0.0f; //I.e it's zero
}
// finalLikelihood[threadIndex] = sarray[threadIndex];
// Reduction
__syncthreads();
for (int s = blockDim.x/2; s > 0; s>>=1)
{
//Only works for powers of 2
if (threadIndex<s)
{
sarray[threadIndex] += sarray[threadIndex+s];
}
__syncthreads();
}
if (threadIndex==0)
//Since everything has been synced, sarray[0] now holds our result
{
finalLikelihood[blockIdx.x] = sarray[0];
}
} |
487d5e2200d6d22aa73da095fde2a7ac1a84b979.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @author Connie Shi
* Lab 3: Write a reduction program in CUDA that finds the maximum
* of an array of M integers.
* Part 3 (Improved):
* Write a CUDA version that makes use of shared memory,
* prefetching, and different granularities. Performs better
* than original cudashared.cu version, because it does not
* divide the data into subsets to sequential search.
*
* Should be run on cuda1 machine with 1024 max threads per block.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
#define WARP 32
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* Function Declarations */
void generate_random(int random[], int num_elements);
__global__ void max_in_blocks(int random[], int num_elements);
__device__ void sequential(int random[], int num_elements);
/* Generates M random numbers from 1 to 100000*/
void generate_random(int random[], int num_elements) {
int i;
time_t t;
srand((unsigned)time(&t)); //randomizes seed
for (i = 0; i < num_elements; i++) {
random[i] = (int)(((double)rand()/RAND_MAX)*100000);
}
}
/* global function called from host and executed on kernel
* Uses a tree-like structure to do parallel max reduction.
* Avoids branch diversion, uses prefetching and shared memory.
*/
__global__
void max_in_blocks(int random[], int num_elements) {
__shared__ int sdata[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride;
// Take data from global memory to shared memory for faster access
sdata[tid] = random[index];
__syncthreads();
for (stride = blockDim.x/2; stride >= 32; stride >>= 1) {
if (tid < stride && tid + stride < num_elements) {
int current = sdata[tid + stride];
if (sdata[tid] < current) {
sdata[tid] = current;
}
}
__syncthreads();
}
// Prevents branch divergence because it will stop running
// Through while loop when it reaches the size of the warp
// At which point, the max is in the first 32 positions.
// Sequential search 32 elements is very fast.
if (tid < 32) {
sequential(sdata, num_elements);
random[blockIdx.x] = sdata[0];
}
}
/* Sequential searches through the first 32 positions of the block
* to prevent further divvying up of the warp into different tasks.
*/
__device__
void sequential(int sdata[], int num_elements) {
int i;
int max = 0;
int tid = threadIdx.x;
for (i = tid; i < tid + WARP && i < num_elements; i++) {
if (max < sdata[i]) {
max = sdata[i];
}
}
// Put in index position, first element of the block
sdata[0] = max;
}
/**************************************************************/
int main(int argc, char*argv[]) {
int* h_random;
int* d_random;
int i;
int largest = 0;
clock_t start, end;
if (argc != 2) {
printf("Invalid number of commands: usage ./cudadivshared M\n");
exit(1);
}
// Generate array of random elements
int num_elements = atoi(argv[1]);
h_random = (int*)malloc(sizeof(int) * num_elements);
generate_random(h_random, num_elements);
start = clock();
// Calculation for grid dimensions
int leftover = num_elements % WARP;
int d_elements = num_elements - leftover;
int n_blocks = (int)ceil((double)d_elements/THREADS_PER_BLOCK);
int n_threads = (d_elements > THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : d_elements;
// Allocate space on device and copy over elements
hipError_t err = hipMalloc((void**)&d_random, sizeof(int) * d_elements);
if (err != hipSuccess) {
printf("hipMalloc failure\n");
}
err = hipMemcpy(d_random, h_random, sizeof(int) * d_elements, hipMemcpyHostToDevice);
if (err != hipSuccess) {
printf("hipMemcpy failure\n");
}
// Execute kernel
hipLaunchKernelGGL(( max_in_blocks), dim3(n_blocks), dim3(n_threads), 0, 0, d_random, d_elements);
// While kernel is executing, find the max in leftover elements
for (i = d_elements; i < num_elements; i++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
// Retrieve reduction results, only the first n_blocks element
hipMemcpy(h_random, d_random, sizeof(int) * n_blocks, hipMemcpyDeviceToHost);
// Check through n_blocks elements for the max
for (i = 0; i < n_blocks; i ++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
end = clock();
printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC);
printf("Largest: %d\n", largest);
// Clean up resources
hipFree(d_random);
free(h_random);
}
| 487d5e2200d6d22aa73da095fde2a7ac1a84b979.cu | /*
* @author Connie Shi
* Lab 3: Write a reduction program in CUDA that finds the maximum
* of an array of M integers.
* Part 3 (Improved):
* Write a CUDA version that makes use of shared memory,
* prefetching, and different granularities. Performs better
* than original cudashared.cu version, because it does not
* divide the data into subsets to sequential search.
*
* Should be run on cuda1 machine with 1024 max threads per block.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define WARP 32
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* Function Declarations */
void generate_random(int random[], int num_elements);
__global__ void max_in_blocks(int random[], int num_elements);
__device__ void sequential(int random[], int num_elements);
/* Generates M random numbers from 1 to 100000*/
void generate_random(int random[], int num_elements) {
int i;
time_t t;
srand((unsigned)time(&t)); //randomizes seed
for (i = 0; i < num_elements; i++) {
random[i] = (int)(((double)rand()/RAND_MAX)*100000);
}
}
/* global function called from host and executed on kernel
* Uses a tree-like structure to do parallel max reduction.
* Avoids branch diversion, uses prefetching and shared memory.
*/
__global__
void max_in_blocks(int random[], int num_elements) {
__shared__ int sdata[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride;
// Take data from global memory to shared memory for faster access
sdata[tid] = random[index];
__syncthreads();
for (stride = blockDim.x/2; stride >= 32; stride >>= 1) {
if (tid < stride && tid + stride < num_elements) {
int current = sdata[tid + stride];
if (sdata[tid] < current) {
sdata[tid] = current;
}
}
__syncthreads();
}
// Prevents branch divergence because it will stop running
// Through while loop when it reaches the size of the warp
// At which point, the max is in the first 32 positions.
// Sequential search 32 elements is very fast.
if (tid < 32) {
sequential(sdata, num_elements);
random[blockIdx.x] = sdata[0];
}
}
/* Sequential searches through the first 32 positions of the block
* to prevent further divvying up of the warp into different tasks.
*/
__device__
void sequential(int sdata[], int num_elements) {
int i;
int max = 0;
int tid = threadIdx.x;
for (i = tid; i < tid + WARP && i < num_elements; i++) {
if (max < sdata[i]) {
max = sdata[i];
}
}
// Put in index position, first element of the block
sdata[0] = max;
}
/**************************************************************/
int main(int argc, char*argv[]) {
int* h_random;
int* d_random;
int i;
int largest = 0;
clock_t start, end;
if (argc != 2) {
printf("Invalid number of commands: usage ./cudadivshared M\n");
exit(1);
}
// Generate array of random elements
int num_elements = atoi(argv[1]);
h_random = (int*)malloc(sizeof(int) * num_elements);
generate_random(h_random, num_elements);
start = clock();
// Calculation for grid dimensions
int leftover = num_elements % WARP;
int d_elements = num_elements - leftover;
int n_blocks = (int)ceil((double)d_elements/THREADS_PER_BLOCK);
int n_threads = (d_elements > THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : d_elements;
// Allocate space on device and copy over elements
cudaError_t err = cudaMalloc((void**)&d_random, sizeof(int) * d_elements);
if (err != cudaSuccess) {
printf("cudaMalloc failure\n");
}
err = cudaMemcpy(d_random, h_random, sizeof(int) * d_elements, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("cudaMemcpy failure\n");
}
// Execute kernel
max_in_blocks<<<n_blocks, n_threads>>>(d_random, d_elements);
// While kernel is executing, find the max in leftover elements
for (i = d_elements; i < num_elements; i++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
// Retrieve reduction results, only the first n_blocks element
cudaMemcpy(h_random, d_random, sizeof(int) * n_blocks, cudaMemcpyDeviceToHost);
// Check through n_blocks elements for the max
for (i = 0; i < n_blocks; i ++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
end = clock();
printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC);
printf("Largest: %d\n", largest);
// Clean up resources
cudaFree(d_random);
free(h_random);
}
|
f4c76f4e2dd567847b29dbfb2576318321230e54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file multi_lans.cu
* \brief multi-tensor LANS optimizer
* \author Shuai Zheng
*/
#include "./multi_lans-inl.h"
namespace mxnet {
namespace op {
#define BLOCK_SIZE_LAMB 512
#define ILP_LAMB 4
template <bool has_mixed_precision, typename MPDType, typename DType>
__global__ void KernelStep1(const MultiLANSKernelParam<DType, MPDType> kernel_params,
const float beta1,
const float beta2,
const MPDType beta3,
const MPDType beta4,
const float epsilon,
const float clip_gradient,
const float rescale_grad,
float* g_sq_norm,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk) {
const int tensor_id = block_to_tensor[blockIdx.x];
const int chunck_id = block_to_chunk[blockIdx.x];
const int start_pos = chunck_id * kernel_params.chunk_size + threadIdx.x;
const int stop_pos = chunck_id * kernel_params.chunk_size + kernel_params.chunk_size;
MPDType g_norm = sqrtf(g_sq_norm[tensor_id]);
MPDType biascorrection1, biascorrection2;
biascorrection1 = 1.0 - static_cast<MPDType>(
pow(beta1, static_cast<float>(kernel_params.step_count[tensor_id])));
biascorrection2 = 1.0 - static_cast<MPDType>(
pow(beta2, static_cast<float>(kernel_params.step_count[tensor_id])));
MPDType r_weight[ILP_LAMB];
MPDType r_grad[ILP_LAMB];
MPDType r_mean[ILP_LAMB];
MPDType r_var[ILP_LAMB];
MPDType r_m[ILP_LAMB];
MPDType r_g[ILP_LAMB];
for (size_t i = start_pos; i < stop_pos && i < kernel_params.sizes[tensor_id];
i += blockDim.x * ILP_LAMB) {
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int load_pos = i + ii * blockDim.x;
if (load_pos < stop_pos && load_pos < kernel_params.sizes[tensor_id]) {
r_weight[ii] = has_mixed_precision ?
kernel_params.weights32[tensor_id][load_pos] :
static_cast<MPDType>(kernel_params.weights[tensor_id][load_pos]);
r_grad[ii] = static_cast<MPDType>(kernel_params.grads[tensor_id][load_pos]);
r_mean[ii] = kernel_params.mean[tensor_id][load_pos];
r_var[ii] = kernel_params.var[tensor_id][load_pos];
} else {
r_weight[ii] = static_cast<MPDType>(0);
r_grad[ii] = static_cast<MPDType>(0);
r_mean[ii] = static_cast<MPDType>(0);
r_var[ii] = static_cast<MPDType>(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
r_grad[ii] = (r_grad[ii] * rescale_grad) / g_norm;
if (clip_gradient >= 0.0f)
r_grad[ii] = max(min(r_grad[ii], clip_gradient), -clip_gradient);
r_mean[ii] = static_cast<MPDType>(beta1) * r_mean[ii] + beta3 * r_grad[ii];
r_var[ii] = static_cast<MPDType>(beta2) * r_var[ii] + beta4 * r_grad[ii] * r_grad[ii];
MPDType r_var_hat = sqrt(r_var[ii] / biascorrection2) + static_cast<MPDType>(epsilon);
r_m[ii] = (r_mean[ii] / biascorrection1) / r_var_hat;
r_g[ii] = r_grad[ii] / r_var_hat;
r_m[ii] = __fmaf_rn(kernel_params.wds[tensor_id], r_weight[ii], r_m[ii]);
r_g[ii] = __fmaf_rn(kernel_params.wds[tensor_id], r_weight[ii], r_g[ii]);
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int store_pos = i + ii * blockDim.x;
if (store_pos < stop_pos && store_pos < kernel_params.sizes[tensor_id]) {
kernel_params.mean[tensor_id][store_pos] = r_mean[ii];
kernel_params.var[tensor_id][store_pos] = r_var[ii];
temp_m[kernel_params.tensor2temp_g[tensor_id] + store_pos] = r_m[ii];
temp_g[kernel_params.tensor2temp_g[tensor_id] + store_pos] = r_g[ii];
}
}
}
}
template <bool has_mixed_precision, typename MPDType, typename DType>
__global__ void KernelStep2(const MultiLANSKernelParam<DType, MPDType> kernel_params,
const float beta1,
const MPDType beta3,
const float* sum_sq_weigths,
const float* sum_sq_temp_m,
const float* sum_sq_temp_g,
const float* temp_m,
const float* temp_g,
const float lower_bound,
const float upper_bound,
int* block_to_tensor,
int* block_to_chunk,
const OpReqType req) {
const int tensor_id = block_to_tensor[blockIdx.x];
const int chunck_id = block_to_chunk[blockIdx.x];
const int start_pos = chunck_id * kernel_params.chunk_size + threadIdx.x;
const int stop_pos = chunck_id * kernel_params.chunk_size + kernel_params.chunk_size;
MPDType r1 = sqrtf(sum_sq_weigths[tensor_id]);
MPDType r2_m = sqrtf(sum_sq_temp_m[tensor_id]);
MPDType r2_g = sqrtf(sum_sq_temp_g[tensor_id]);
if (lower_bound >= 0)
r1 = max(r1, lower_bound);
if (upper_bound >= 0)
r1 = min(r1, upper_bound);
MPDType lr_adjusted_m, lr_adjusted_g;
if (r1 == 0.0f || r2_m == 0.0f)
lr_adjusted_m = kernel_params.learning_rates[tensor_id];
else
lr_adjusted_m = kernel_params.learning_rates[tensor_id] * r1 / r2_m;
if (r1 == 0.0f || r2_g == 0.0f)
lr_adjusted_g = kernel_params.learning_rates[tensor_id];
else
lr_adjusted_g = kernel_params.learning_rates[tensor_id] * r1 / r2_g;
lr_adjusted_m *= static_cast<MPDType>(beta1);
lr_adjusted_g *= beta3;
MPDType r_weight[ILP_LAMB];
MPDType r_m[ILP_LAMB];
MPDType r_g[ILP_LAMB];
for (size_t i = start_pos; i < stop_pos && i < kernel_params.sizes[tensor_id];
i += blockDim.x * ILP_LAMB) {
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int load_pos = i + ii * blockDim.x;
if (load_pos < stop_pos && load_pos < kernel_params.sizes[tensor_id]) {
r_weight[ii] = has_mixed_precision ?
kernel_params.weights32[tensor_id][load_pos] :
static_cast<MPDType>(kernel_params.weights[tensor_id][load_pos]);
r_m[ii] = temp_m[kernel_params.tensor2temp_g[tensor_id] + load_pos];
r_g[ii] = temp_g[kernel_params.tensor2temp_g[tensor_id] + load_pos];
}
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
r_weight[ii] -= lr_adjusted_m * r_m[ii] + lr_adjusted_g * r_g[ii];
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int store_pos = i + ii * blockDim.x;
if (store_pos < stop_pos && store_pos < kernel_params.sizes[tensor_id]) {
if (has_mixed_precision)
kernel_params.weights32[tensor_id][store_pos] = r_weight[ii];
KERNEL_ASSIGN(kernel_params.out_data[tensor_id][store_pos], req, r_weight[ii]);
}
}
}
}
template <typename MPDType, typename DType>
void CallKernel1(Stream<gpu>* s,
const MultiLANSKernelParam<DType, MPDType>& kernel_params,
const MultiLANSParam& param,
float* g_sq_norm,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk) {
int nblocks = kernel_params.nchunks;
int* host_block2tensor = reinterpret_cast<int*>(malloc(kernel_params.nchunks * sizeof(int)));
int* host_block2chunk = reinterpret_cast<int*>(malloc(kernel_params.nchunks * sizeof(int)));
int chunk_id = 0;
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
int current_chunk = 0;
for (size_t j = 0; j < kernel_params.sizes[index]; j += kernel_params.chunk_size) {
host_block2tensor[chunk_id] = index;
host_block2chunk[chunk_id] = current_chunk;
current_chunk++;
chunk_id++;
}
}
hipMemcpyAsync(block_to_tensor,
host_block2tensor,
kernel_params.nchunks * sizeof(int),
hipMemcpyHostToDevice,
Stream<gpu>::GetStream(s));
hipMemcpyAsync(block_to_chunk,
host_block2chunk,
kernel_params.nchunks * sizeof(int),
hipMemcpyHostToDevice,
Stream<gpu>::GetStream(s));
bool has_mixed_precision = !std::is_same<DType, MPDType>::value;
MPDType beta3 = 1.0 - param.beta1;
MPDType beta4 = 1.0 - param.beta2;
if (has_mixed_precision)
hipLaunchKernelGGL(( KernelStep1<true>)
, dim3(nblocks), dim3(BLOCK_SIZE_LAMB), 0, Stream<gpu>::GetStream(s), kernel_params,
param.beta1,
param.beta2,
beta3,
beta4,
param.epsilon,
param.clip_gradient,
param.rescale_grad,
g_sq_norm,
temp_m,
temp_g,
block_to_tensor,
block_to_chunk);
else
hipLaunchKernelGGL(( KernelStep1<false>)
, dim3(nblocks), dim3(BLOCK_SIZE_LAMB), 0, Stream<gpu>::GetStream(s), kernel_params,
param.beta1,
param.beta2,
beta3,
beta4,
param.epsilon,
param.clip_gradient,
param.rescale_grad,
g_sq_norm,
temp_m,
temp_g,
block_to_tensor,
block_to_chunk);
}
template <typename MPDType, typename DType>
void CallKernel2(Stream<gpu>* s,
const MultiLANSKernelParam<DType, MPDType>& kernel_params,
const MultiLANSParam& param,
float* r1,
float* r2_m,
float* r2_g,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk,
const OpReqType req) {
size_t nblocks = kernel_params.nchunks;
bool has_mixed_precision = !std::is_same<DType, MPDType>::value;
MPDType beta3 = 1.0 - param.beta1;
if (has_mixed_precision)
hipLaunchKernelGGL(( KernelStep2<true>), dim3(nblocks), dim3(BLOCK_SIZE_LAMB), 0, Stream<gpu>::GetStream(s), kernel_params,
param.beta1,
beta3,
r1,
r2_m,
r2_g,
temp_m,
temp_g,
param.lower_bound,
param.upper_bound,
block_to_tensor,
block_to_chunk,
req);
else
hipLaunchKernelGGL(( KernelStep2<false>)
, dim3(nblocks), dim3(BLOCK_SIZE_LAMB), 0, Stream<gpu>::GetStream(s), kernel_params,
param.beta1,
beta3,
r1,
r2_m,
r2_g,
temp_m,
temp_g,
param.lower_bound,
param.upper_bound,
block_to_tensor,
block_to_chunk,
req);
}
NNVM_REGISTER_OP(_multi_lans_update)
.set_attr<FCompute>("FCompute<gpu>", MultiLANSUpdate<gpu, false>);
NNVM_REGISTER_OP(_multi_mp_lans_update)
.set_attr<FCompute>("FCompute<gpu>", MultiLANSUpdate<gpu, true>);
} // namespace op
} // namespace mxnet
| f4c76f4e2dd567847b29dbfb2576318321230e54.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file multi_lans.cu
* \brief multi-tensor LANS optimizer
* \author Shuai Zheng
*/
#include "./multi_lans-inl.h"
namespace mxnet {
namespace op {
#define BLOCK_SIZE_LAMB 512
#define ILP_LAMB 4
template <bool has_mixed_precision, typename MPDType, typename DType>
__global__ void KernelStep1(const MultiLANSKernelParam<DType, MPDType> kernel_params,
const float beta1,
const float beta2,
const MPDType beta3,
const MPDType beta4,
const float epsilon,
const float clip_gradient,
const float rescale_grad,
float* g_sq_norm,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk) {
const int tensor_id = block_to_tensor[blockIdx.x];
const int chunck_id = block_to_chunk[blockIdx.x];
const int start_pos = chunck_id * kernel_params.chunk_size + threadIdx.x;
const int stop_pos = chunck_id * kernel_params.chunk_size + kernel_params.chunk_size;
MPDType g_norm = sqrtf(g_sq_norm[tensor_id]);
MPDType biascorrection1, biascorrection2;
biascorrection1 = 1.0 - static_cast<MPDType>(
pow(beta1, static_cast<float>(kernel_params.step_count[tensor_id])));
biascorrection2 = 1.0 - static_cast<MPDType>(
pow(beta2, static_cast<float>(kernel_params.step_count[tensor_id])));
MPDType r_weight[ILP_LAMB];
MPDType r_grad[ILP_LAMB];
MPDType r_mean[ILP_LAMB];
MPDType r_var[ILP_LAMB];
MPDType r_m[ILP_LAMB];
MPDType r_g[ILP_LAMB];
for (size_t i = start_pos; i < stop_pos && i < kernel_params.sizes[tensor_id];
i += blockDim.x * ILP_LAMB) {
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int load_pos = i + ii * blockDim.x;
if (load_pos < stop_pos && load_pos < kernel_params.sizes[tensor_id]) {
r_weight[ii] = has_mixed_precision ?
kernel_params.weights32[tensor_id][load_pos] :
static_cast<MPDType>(kernel_params.weights[tensor_id][load_pos]);
r_grad[ii] = static_cast<MPDType>(kernel_params.grads[tensor_id][load_pos]);
r_mean[ii] = kernel_params.mean[tensor_id][load_pos];
r_var[ii] = kernel_params.var[tensor_id][load_pos];
} else {
r_weight[ii] = static_cast<MPDType>(0);
r_grad[ii] = static_cast<MPDType>(0);
r_mean[ii] = static_cast<MPDType>(0);
r_var[ii] = static_cast<MPDType>(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
r_grad[ii] = (r_grad[ii] * rescale_grad) / g_norm;
if (clip_gradient >= 0.0f)
r_grad[ii] = max(min(r_grad[ii], clip_gradient), -clip_gradient);
r_mean[ii] = static_cast<MPDType>(beta1) * r_mean[ii] + beta3 * r_grad[ii];
r_var[ii] = static_cast<MPDType>(beta2) * r_var[ii] + beta4 * r_grad[ii] * r_grad[ii];
MPDType r_var_hat = sqrt(r_var[ii] / biascorrection2) + static_cast<MPDType>(epsilon);
r_m[ii] = (r_mean[ii] / biascorrection1) / r_var_hat;
r_g[ii] = r_grad[ii] / r_var_hat;
r_m[ii] = __fmaf_rn(kernel_params.wds[tensor_id], r_weight[ii], r_m[ii]);
r_g[ii] = __fmaf_rn(kernel_params.wds[tensor_id], r_weight[ii], r_g[ii]);
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int store_pos = i + ii * blockDim.x;
if (store_pos < stop_pos && store_pos < kernel_params.sizes[tensor_id]) {
kernel_params.mean[tensor_id][store_pos] = r_mean[ii];
kernel_params.var[tensor_id][store_pos] = r_var[ii];
temp_m[kernel_params.tensor2temp_g[tensor_id] + store_pos] = r_m[ii];
temp_g[kernel_params.tensor2temp_g[tensor_id] + store_pos] = r_g[ii];
}
}
}
}
template <bool has_mixed_precision, typename MPDType, typename DType>
__global__ void KernelStep2(const MultiLANSKernelParam<DType, MPDType> kernel_params,
const float beta1,
const MPDType beta3,
const float* sum_sq_weigths,
const float* sum_sq_temp_m,
const float* sum_sq_temp_g,
const float* temp_m,
const float* temp_g,
const float lower_bound,
const float upper_bound,
int* block_to_tensor,
int* block_to_chunk,
const OpReqType req) {
const int tensor_id = block_to_tensor[blockIdx.x];
const int chunck_id = block_to_chunk[blockIdx.x];
const int start_pos = chunck_id * kernel_params.chunk_size + threadIdx.x;
const int stop_pos = chunck_id * kernel_params.chunk_size + kernel_params.chunk_size;
MPDType r1 = sqrtf(sum_sq_weigths[tensor_id]);
MPDType r2_m = sqrtf(sum_sq_temp_m[tensor_id]);
MPDType r2_g = sqrtf(sum_sq_temp_g[tensor_id]);
if (lower_bound >= 0)
r1 = max(r1, lower_bound);
if (upper_bound >= 0)
r1 = min(r1, upper_bound);
MPDType lr_adjusted_m, lr_adjusted_g;
if (r1 == 0.0f || r2_m == 0.0f)
lr_adjusted_m = kernel_params.learning_rates[tensor_id];
else
lr_adjusted_m = kernel_params.learning_rates[tensor_id] * r1 / r2_m;
if (r1 == 0.0f || r2_g == 0.0f)
lr_adjusted_g = kernel_params.learning_rates[tensor_id];
else
lr_adjusted_g = kernel_params.learning_rates[tensor_id] * r1 / r2_g;
lr_adjusted_m *= static_cast<MPDType>(beta1);
lr_adjusted_g *= beta3;
MPDType r_weight[ILP_LAMB];
MPDType r_m[ILP_LAMB];
MPDType r_g[ILP_LAMB];
for (size_t i = start_pos; i < stop_pos && i < kernel_params.sizes[tensor_id];
i += blockDim.x * ILP_LAMB) {
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int load_pos = i + ii * blockDim.x;
if (load_pos < stop_pos && load_pos < kernel_params.sizes[tensor_id]) {
r_weight[ii] = has_mixed_precision ?
kernel_params.weights32[tensor_id][load_pos] :
static_cast<MPDType>(kernel_params.weights[tensor_id][load_pos]);
r_m[ii] = temp_m[kernel_params.tensor2temp_g[tensor_id] + load_pos];
r_g[ii] = temp_g[kernel_params.tensor2temp_g[tensor_id] + load_pos];
}
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
r_weight[ii] -= lr_adjusted_m * r_m[ii] + lr_adjusted_g * r_g[ii];
}
#pragma unroll
for (int ii = 0; ii < ILP_LAMB; ii++) {
int store_pos = i + ii * blockDim.x;
if (store_pos < stop_pos && store_pos < kernel_params.sizes[tensor_id]) {
if (has_mixed_precision)
kernel_params.weights32[tensor_id][store_pos] = r_weight[ii];
KERNEL_ASSIGN(kernel_params.out_data[tensor_id][store_pos], req, r_weight[ii]);
}
}
}
}
template <typename MPDType, typename DType>
void CallKernel1(Stream<gpu>* s,
const MultiLANSKernelParam<DType, MPDType>& kernel_params,
const MultiLANSParam& param,
float* g_sq_norm,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk) {
int nblocks = kernel_params.nchunks;
int* host_block2tensor = reinterpret_cast<int*>(malloc(kernel_params.nchunks * sizeof(int)));
int* host_block2chunk = reinterpret_cast<int*>(malloc(kernel_params.nchunks * sizeof(int)));
int chunk_id = 0;
for (size_t index = 0; index < kernel_params.ntensors; ++index) {
int current_chunk = 0;
for (size_t j = 0; j < kernel_params.sizes[index]; j += kernel_params.chunk_size) {
host_block2tensor[chunk_id] = index;
host_block2chunk[chunk_id] = current_chunk;
current_chunk++;
chunk_id++;
}
}
cudaMemcpyAsync(block_to_tensor,
host_block2tensor,
kernel_params.nchunks * sizeof(int),
cudaMemcpyHostToDevice,
Stream<gpu>::GetStream(s));
cudaMemcpyAsync(block_to_chunk,
host_block2chunk,
kernel_params.nchunks * sizeof(int),
cudaMemcpyHostToDevice,
Stream<gpu>::GetStream(s));
bool has_mixed_precision = !std::is_same<DType, MPDType>::value;
MPDType beta3 = 1.0 - param.beta1;
MPDType beta4 = 1.0 - param.beta2;
if (has_mixed_precision)
KernelStep1<true>
<<<nblocks, BLOCK_SIZE_LAMB, 0, Stream<gpu>::GetStream(s)>>>(kernel_params,
param.beta1,
param.beta2,
beta3,
beta4,
param.epsilon,
param.clip_gradient,
param.rescale_grad,
g_sq_norm,
temp_m,
temp_g,
block_to_tensor,
block_to_chunk);
else
KernelStep1<false>
<<<nblocks, BLOCK_SIZE_LAMB, 0, Stream<gpu>::GetStream(s)>>>(kernel_params,
param.beta1,
param.beta2,
beta3,
beta4,
param.epsilon,
param.clip_gradient,
param.rescale_grad,
g_sq_norm,
temp_m,
temp_g,
block_to_tensor,
block_to_chunk);
}
template <typename MPDType, typename DType>
void CallKernel2(Stream<gpu>* s,
const MultiLANSKernelParam<DType, MPDType>& kernel_params,
const MultiLANSParam& param,
float* r1,
float* r2_m,
float* r2_g,
float* temp_m,
float* temp_g,
int* block_to_tensor,
int* block_to_chunk,
const OpReqType req) {
size_t nblocks = kernel_params.nchunks;
bool has_mixed_precision = !std::is_same<DType, MPDType>::value;
MPDType beta3 = 1.0 - param.beta1;
if (has_mixed_precision)
KernelStep2<true><<<nblocks, BLOCK_SIZE_LAMB, 0, Stream<gpu>::GetStream(s)>>>(kernel_params,
param.beta1,
beta3,
r1,
r2_m,
r2_g,
temp_m,
temp_g,
param.lower_bound,
param.upper_bound,
block_to_tensor,
block_to_chunk,
req);
else
KernelStep2<false>
<<<nblocks, BLOCK_SIZE_LAMB, 0, Stream<gpu>::GetStream(s)>>>(kernel_params,
param.beta1,
beta3,
r1,
r2_m,
r2_g,
temp_m,
temp_g,
param.lower_bound,
param.upper_bound,
block_to_tensor,
block_to_chunk,
req);
}
NNVM_REGISTER_OP(_multi_lans_update)
.set_attr<FCompute>("FCompute<gpu>", MultiLANSUpdate<gpu, false>);
NNVM_REGISTER_OP(_multi_mp_lans_update)
.set_attr<FCompute>("FCompute<gpu>", MultiLANSUpdate<gpu, true>);
} // namespace op
} // namespace mxnet
|
b026f981424e387519eb94197fbc9b1a305ff59f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************
This file is part of the XLQC program.
Copyright (C) 2015 Xin Li <[email protected]>
Filename: cuda_rys_sp.cu
License: BSD 3-Clause License
* The implementation of Rys quadrature routines in C is taken from the
* PyQuante quantum chemistry program, Copyright (c) 2004, Richard P. Muller.
* PyQuante version 1.2 and later is covered by the modified BSD license.
* Please see int_lib/LICENSE.
This software is provided by the copyright holders and contributors "as is"
and any express or implied warranties, including, but not limited to, the
implied warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall the copyright holder or contributors be liable
for any direct, indirect, incidental, special, exemplary, or consequential
damages (including, but not limited to, procurement of substitute goods or
services; loss of use, data, or profits; or business interruption) however
caused and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
*****************************************************************************/
#include <string>
#include "typedef.h"
#include "cuda_rys_sp.h"
void my_cuda_safe(hipError_t err, std::string word)
{
if(err != hipSuccess)
{
fprintf(stderr, "Error during %s: ", word.c_str());
// check for error
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
}
}
__device__ int cuda_ij2intindex(int i, int j)
{
if (i < j) {
int t = i; i = j; j = t;
}
return i * (i + 1) / 2 + j;
}
__device__ int cuda_fact(int n){
int result = 1;
for (int i = 2; i <= n; i++) result *= i;
return result;
}
__device__ int cuda_binomial(int a, int b){
return cuda_fact(a)/(cuda_fact(b)*cuda_fact(a-b));
}
__device__ void cuda_Roots(int n, float X, float roots[], float weights[]){
if (n <= 3)
cuda_Root123(n,X, roots,weights);
else if (n==4)
cuda_Root4(X, roots,weights);
else if (n==5)
cuda_Root5(X, roots,weights);
else
cuda_Root6(n,X, roots,weights);
return;
}
__device__ void cuda_Root123(int n, float X, float roots[], float weights[]){
float R12, PIE4, R22, W22, R13, R23, W23, R33, W33;
float RT1=0,RT2=0,RT3=0,WW1=0,WW2=0,WW3=0;
float F1,F2,E,T1,T2,T3,A1,A2,Y;
R12 = 2.75255128608411E-01f;
PIE4 = 7.85398163397448E-01f;
R22 = 2.72474487139158E+00f;
W22 = 9.17517095361369E-02f;
R13 = 1.90163509193487E-01f;
R23 = 1.78449274854325E+00f;
W23 = 1.77231492083829E-01f;
R33 = 5.52534374226326E+00f;
W33 = 5.11156880411248E-03f;
if (X < 3.e-7f){
if (n == 1){
RT1 = 0.5E+00f -X/5.0E+00f;
WW1 = 1.0E+00f -X/3.0E+00f;
} else if (n == 2) {
RT1 = 1.30693606237085E-01f -2.90430236082028E-02f *X;
RT2 = 2.86930639376291E+00f -6.37623643058102E-01f *X;
WW1 = 6.52145154862545E-01f -1.22713621927067E-01f *X;
WW2 = 3.47854845137453E-01f -2.10619711404725E-01f *X;
} else if (n == 3) {
RT1 = 6.03769246832797E-02f -9.28875764357368E-03f *X;
RT2 = 7.76823355931043E-01f -1.19511285527878E-01f *X;
RT3 = 6.66279971938567E+00f -1.02504611068957E+00f *X;
WW1 = 4.67913934572691E-01f -5.64876917232519E-02f *X;
WW2 = 3.60761573048137E-01f -1.49077186455208E-01f *X;
WW3 = 1.71324492379169E-01f -1.27768455150979E-01f *X;
}
} else if (X < 1.f) {
if (n == 1){
F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X-
1.15662609053481E-05f )*X+9.25197374512647E-05f )*X-
6.40994113129432E-04f )*X+3.78787044215009E-03f )*X-
1.85185172458485E-02f )*X+7.14285713298222E-02f )*X-
1.99999999997023E-01f )*X+3.33333333333318E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X-
1.15662609053481E-05f )*X+9.25197374512647E-05f )*X-
6.40994113129432E-04f )*X+3.78787044215009E-03f )*X-
1.85185172458485E-02f )*X+7.14285713298222E-02f )*X-
1.99999999997023E-01f )*X+3.33333333333318E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = (((((((-2.35234358048491E-09f*X+2.49173650389842E-08f)*X-
4.558315364581E-08f)*X-2.447252174587E-06f)*X+
4.743292959463E-05f)*X-5.33184749432408E-04f )*X+
4.44654947116579E-03f )*X-2.90430236084697E-02f )*X+
1.30693606237085E-01f;
RT2 = (((((((-2.47404902329170E-08f*X+2.36809910635906E-07f)*X+
1.835367736310E-06f)*X-2.066168802076E-05f)*X-
1.345693393936E-04f)*X-5.88154362858038E-05f )*X+
5.32735082098139E-02f )*X-6.37623643056745E-01f )*X+
2.86930639376289E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n==3){
RT1 = ((((((-5.10186691538870E-10f*X+2.40134415703450E-08f)*X-
5.01081057744427E-07f )*X+7.58291285499256E-06f )*X-
9.55085533670919E-05f )*X+1.02893039315878E-03f )*X-
9.28875764374337E-03f )*X+6.03769246832810E-02f;
RT2 = ((((((-1.29646524960555E-08f*X+7.74602292865683E-08f)*X+
1.56022811158727E-06f )*X-1.58051990661661E-05f )*X-
3.30447806384059E-04f )*X+9.74266885190267E-03f )*X-
1.19511285526388E-01f )*X+7.76823355931033E-01f;
RT3 = ((((((-9.28536484109606E-09f*X-3.02786290067014E-07f)*X-
2.50734477064200E-06f )*X-7.32728109752881E-06f )*X+
2.44217481700129E-04f )*X+4.94758452357327E-02f )*X-
1.02504611065774E+00f )*X+6.66279971938553E+00f;
F2 = ((((((((-7.60911486098850E-08f*X+1.09552870123182E-06f )*X-
1.03463270693454E-05f )*X+8.16324851790106E-05f )*X-
5.55526624875562E-04f )*X+3.20512054753924E-03f )*X-
1.51515139838540E-02f )*X+5.55555554649585E-02f )*X-
1.42857142854412E-01f )*X+1.99999999999986E-01f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 3.f) {
Y = X-2.0E+00f;
if (n == 1) {
F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y-
2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y-
1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y-
1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y-
3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y-
5.29428148329736E-02f )*Y+1.15702180856167E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y-
2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y-
1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y-
1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y-
3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y-
5.29428148329736E-02f )*Y+1.15702180856167E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = (((((((((-6.36859636616415E-12f*Y+8.47417064776270E-11f)*Y-
5.152207846962E-10f)*Y-3.846389873308E-10f)*Y+
8.472253388380E-08f)*Y-1.85306035634293E-06f )*Y+
2.47191693238413E-05f )*Y-2.49018321709815E-04f )*Y+
2.19173220020161E-03f )*Y-1.63329339286794E-02f )*Y+
8.68085688285261E-02f;
RT2 = ((((((((( 1.45331350488343E-10f*Y+2.07111465297976E-09f)*Y-
1.878920917404E-08f)*Y-1.725838516261E-07f)*Y+
2.247389642339E-06f)*Y+9.76783813082564E-06f )*Y-
1.93160765581969E-04f )*Y-1.58064140671893E-03f )*Y+
4.85928174507904E-02f )*Y-4.30761584997596E-01f )*Y+
1.80400974537950E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = (((((((( 1.44687969563318E-12f*Y+4.85300143926755E-12f)*Y-
6.55098264095516E-10f )*Y+1.56592951656828E-08f )*Y-
2.60122498274734E-07f )*Y+3.86118485517386E-06f )*Y-
5.13430986707889E-05f )*Y+6.03194524398109E-04f )*Y-
6.11219349825090E-03f )*Y+4.52578254679079E-02f;
RT2 = ((((((( 6.95964248788138E-10f*Y-5.35281831445517E-09f)*Y-
6.745205954533E-08f)*Y+1.502366784525E-06f)*Y+
9.923326947376E-07f)*Y-3.89147469249594E-04f )*Y+
7.51549330892401E-03f )*Y-8.48778120363400E-02f )*Y+
5.73928229597613E-01f;
RT3 = ((((((((-2.81496588401439E-10f*Y+3.61058041895031E-09f)*Y+
4.53631789436255E-08f )*Y-1.40971837780847E-07f )*Y-
6.05865557561067E-06f )*Y-5.15964042227127E-05f )*Y+
3.34761560498171E-05f )*Y+5.04871005319119E-02f )*Y-
8.24708946991557E-01f )*Y+4.81234667357205E+00f;
F2 = ((((((((((-1.48044231072140E-10f*Y+1.78157031325097E-09f )*Y-
1.92514145088973E-08f )*Y+1.92804632038796E-07f )*Y-
1.73806555021045E-06f )*Y+1.39195169625425E-05f )*Y-
9.74574633246452E-05f )*Y+5.83701488646511E-04f )*Y-
2.89955494844975E-03f )*Y+1.13847001113810E-02f )*Y-
3.23446977320647E-02f )*Y+5.29428148329709E-02f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 5.f){
Y = X-4.0E+00f;
if (n == 1){
F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y-
3.614965656163E-09f)*Y+3.760256799971E-08f)*Y-
3.553558319675E-07f)*Y+3.022556449731E-06f)*Y-
2.290098979647E-05f)*Y+1.526537461148E-04f)*Y-
8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y-
1.75257821619926E-02f )*Y+5.28406320615584E-02f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y-
3.614965656163E-09f)*Y+3.760256799971E-08f)*Y-
3.553558319675E-07f)*Y+3.022556449731E-06f)*Y-
2.290098979647E-05f)*Y+1.526537461148E-04f)*Y-
8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y-
1.75257821619926E-02f )*Y+5.28406320615584E-02f;
WW1 = (X+X)*F1+expf(-X);
RT1 = ((((((((-4.11560117487296E-12f*Y+7.10910223886747E-11f)*Y-
1.73508862390291E-09f )*Y+5.93066856324744E-08f )*Y-
9.76085576741771E-07f )*Y+1.08484384385679E-05f )*Y-
1.12608004981982E-04f )*Y+1.16210907653515E-03f )*Y-
9.89572595720351E-03f )*Y+6.12589701086408E-02f;
RT2 = (((((((((-1.80555625241001E-10f*Y+5.44072475994123E-10f)*Y+
1.603498045240E-08f)*Y-1.497986283037E-07f)*Y-
7.017002532106E-07f)*Y+1.85882653064034E-05f )*Y-
2.04685420150802E-05f )*Y-2.49327728643089E-03f )*Y+
3.56550690684281E-02f )*Y-2.60417417692375E-01f )*Y+
1.12155283108289E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = ((((((( 1.44265709189601E-11f*Y-4.66622033006074E-10f)*Y+
7.649155832025E-09f)*Y-1.229940017368E-07f)*Y+
2.026002142457E-06f)*Y-2.87048671521677E-05f )*Y+
3.70326938096287E-04f )*Y-4.21006346373634E-03f )*Y+
3.50898470729044E-02f;
RT2 = ((((((((-2.65526039155651E-11f*Y+1.97549041402552E-10f)*Y+
2.15971131403034E-09f )*Y-7.95045680685193E-08f )*Y+
5.15021914287057E-07f )*Y+1.11788717230514E-05f )*Y-
3.33739312603632E-04f )*Y+5.30601428208358E-03f )*Y-
5.93483267268959E-02f )*Y+4.31180523260239E-01f;
RT3 = ((((((((-3.92833750584041E-10f*Y-4.16423229782280E-09f)*Y+
4.42413039572867E-08f )*Y+6.40574545989551E-07f )*Y-
3.05512456576552E-06f )*Y-1.05296443527943E-04f )*Y-
6.14120969315617E-04f )*Y+4.89665802767005E-02f )*Y-
6.24498381002855E-01f )*Y+3.36412312243724E+00f;
F2 = ((((((((((-2.36788772599074E-11f*Y+2.89147476459092E-10f )*Y-
3.18111322308846E-09f )*Y+3.25336816562485E-08f )*Y-
3.00873821471489E-07f )*Y+2.48749160874431E-06f )*Y-
1.81353179793672E-05f )*Y+1.14504948737066E-04f )*Y-
6.10614987696677E-04f )*Y+2.64584212770942E-03f )*Y-
8.66415899015349E-03f )*Y+1.75257821619922E-02f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 10.f) {
E = expf(-X);
WW1 = (((((( 4.6897511375022E-01f/X-6.9955602298985E-01f)/X +
5.3689283271887E-01f)/X-3.2883030418398E-01f)/X +
2.4645596956002E-01f)/X-4.9984072848436E-01f)/X -
3.1501078774085E-06f)*E + sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
Y = X-7.5E+00f;
RT1 = (((((((((((((-1.43632730148572E-16f*Y+2.38198922570405E-16f)*
Y+1.358319618800E-14f)*Y-7.064522786879E-14f)*Y-
7.719300212748E-13f)*Y+7.802544789997E-12f)*Y+
6.628721099436E-11f)*Y-1.775564159743E-09f)*Y+
1.713828823990E-08f)*Y-1.497500187053E-07f)*Y+
2.283485114279E-06f)*Y-3.76953869614706E-05f )*Y+
4.74791204651451E-04f )*Y-4.60448960876139E-03f )*Y+
3.72458587837249E-02f;
RT2 = (((((((((((( 2.48791622798900E-14f*Y-1.36113510175724E-13f)*Y-
2.224334349799E-12f)*Y+4.190559455515E-11f)*Y-
2.222722579924E-10f)*Y-2.624183464275E-09f)*Y+
6.128153450169E-08f)*Y-4.383376014528E-07f)*Y-
2.49952200232910E-06f )*Y+1.03236647888320E-04f )*Y-
1.44614664924989E-03f )*Y+1.35094294917224E-02f )*Y-
9.53478510453887E-02f )*Y+5.44765245686790E-01f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-7.5E+00f;
RT1 = ((((((((((( 5.74429401360115E-16f*Y+7.11884203790984E-16f)*Y-
6.736701449826E-14f)*Y-6.264613873998E-13f)*Y+
1.315418927040E-11f)*Y-4.23879635610964E-11f )*Y+
1.39032379769474E-09f )*Y-4.65449552856856E-08f )*Y+
7.34609900170759E-07f )*Y-1.08656008854077E-05f )*Y+
1.77930381549953E-04f )*Y-2.39864911618015E-03f )*Y+
2.39112249488821E-02f;
RT2 = ((((((((((( 1.13464096209120E-14f*Y+6.99375313934242E-15f)*Y-
8.595618132088E-13f)*Y-5.293620408757E-12f)*Y-
2.492175211635E-11f)*Y+2.73681574882729E-09f )*Y-
1.06656985608482E-08f )*Y-4.40252529648056E-07f )*Y+
9.68100917793911E-06f )*Y-1.68211091755327E-04f )*Y+
2.69443611274173E-03f )*Y-3.23845035189063E-02f )*Y+
2.75969447451882E-01f;
RT3 = (((((((((((( 6.66339416996191E-15f*Y+1.84955640200794E-13f)*Y-
1.985141104444E-12f)*Y-2.309293727603E-11f)*Y+
3.917984522103E-10f)*Y+1.663165279876E-09f)*Y-
6.205591993923E-08f)*Y+8.769581622041E-09f)*Y+
8.97224398620038E-06f )*Y-3.14232666170796E-05f )*Y-
1.83917335649633E-03f )*Y+3.51246831672571E-02f )*Y-
3.22335051270860E-01f )*Y+1.73582831755430E+00f;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 15.f) {
E = expf(-X);
WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X -
4.9893752514047E-01f)/X-2.1916512131607E-05f)*E
+ sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2) {
RT1 = ((((-1.01041157064226E-05f*X+1.19483054115173E-03f)*X -
6.73760231824074E-02f)*X+1.25705571069895E+00f)*X +
(((-8.57609422987199E+03f/X+5.91005939591842E+03f)/X -
1.70807677109425E+03f)/X+2.64536689959503E+02f)/X -
2.38570496490846E+01f)*E + R12/(X-R12);
RT2 = ((( 3.39024225137123E-04f*X-9.34976436343509E-02f)*X -
4.22216483306320E+00f)*X +
(((-2.08457050986847E+03f/X -
1.04999071905664E+03f)/X+3.39891508992661E+02f)/X -
1.56184800325063E+02f)/X+8.00839033297501E+00f)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-12.5E+00f;
RT1 = ((((((((((( 4.42133001283090E-16f*Y-2.77189767070441E-15f)*Y-
4.084026087887E-14f)*Y+5.379885121517E-13f)*Y+
1.882093066702E-12f)*Y-8.67286219861085E-11f )*Y+
7.11372337079797E-10f )*Y-3.55578027040563E-09f )*Y+
1.29454702851936E-07f )*Y-4.14222202791434E-06f )*Y+
8.04427643593792E-05f )*Y-1.18587782909876E-03f )*Y+
1.53435577063174E-02f;
RT2 = ((((((((((( 6.85146742119357E-15f*Y-1.08257654410279E-14f)*Y-
8.579165965128E-13f)*Y+6.642452485783E-12f)*Y+
4.798806828724E-11f)*Y-1.13413908163831E-09f )*Y+
7.08558457182751E-09f )*Y-5.59678576054633E-08f )*Y+
2.51020389884249E-06f )*Y-6.63678914608681E-05f )*Y+
1.11888323089714E-03f )*Y-1.45361636398178E-02f )*Y+
1.65077877454402E-01f;
RT3 = (((((((((((( 3.20622388697743E-15f*Y-2.73458804864628E-14f)*Y-
3.157134329361E-13f)*Y+8.654129268056E-12f)*Y-
5.625235879301E-11f)*Y-7.718080513708E-10f)*Y+
2.064664199164E-08f)*Y-1.567725007761E-07f)*Y-
1.57938204115055E-06f )*Y+6.27436306915967E-05f )*Y-
1.01308723606946E-03f )*Y+1.13901881430697E-02f )*Y-
1.01449652899450E-01f )*Y+7.77203937334739E-01f;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 33.f) {
E = expf(-X);
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*E + sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
RT1 = ((((-1.14906395546354E-06f*X+1.76003409708332E-04f)*X -
1.71984023644904E-02f)*X-1.37292644149838E-01f)*X +
(-4.75742064274859E+01f/X+9.21005186542857E+00f)/X -
2.31080873898939E-02f)*E + R12/(X-R12);
RT2 = ((( 3.64921633404158E-04f*X-9.71850973831558E-02f)*X -
4.02886174850252E+00f)*X +
(-1.35831002139173E+02f/X -
8.66891724287962E+01f)/X+2.98011277766958E+00f)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
if (X < 20.f) {
RT1 = ((((((-2.43270989903742E-06f*X+3.57901398988359E-04f)*X -
2.34112415981143E-02f)*X+7.81425144913975E-01f)*X -
1.73209218219175E+01f)*X+2.43517435690398E+02f)*X +
(-1.97611541576986E+04f/X+9.82441363463929E+03f)/X -
2.07970687843258E+03f)*E + R13/(X-R13);
RT2 = (((((-2.62627010965435E-04f*X+3.49187925428138E-02f)*X -
3.09337618731880E+00f)*X+1.07037141010778E+02f)*X -
2.36659637247087E+03f)*X +
((-2.91669113681020E+06f/X +
1.41129505262758E+06f)/X-2.91532335433779E+05f)/X +
3.35202872835409E+04f)*E + R23/(X-R23);
RT3 = ((((( 9.31856404738601E-05f*X-2.87029400759565E-02f)*X -
7.83503697918455E-01f)*X-1.84338896480695E+01f)*X +
4.04996712650414E+02f)*X +
(-1.89829509315154E+05f/X +
5.11498390849158E+04f)/X-6.88145821789955E+03f)*E
+ R33/(X-R33);
} else {
RT1 = ((((-4.97561537069643E-04f*X-5.00929599665316E-02f)*X +
1.31099142238996E+00f)*X-1.88336409225481E+01f)*X -
6.60344754467191E+02f /X+1.64931462413877E+02f)*E
+ R13/(X-R13);
RT2 = ((((-4.48218898474906E-03f*X-5.17373211334924E-01f)*X +
1.13691058739678E+01f)*X-1.65426392885291E+02f)*X -
6.30909125686731E+03f /X+1.52231757709236E+03f)*E
+ R23/(X-R23);
RT3 = ((((-1.38368602394293E-02f*X-1.77293428863008E+00f)*X +
1.73639054044562E+01f)*X-3.57615122086961E+02f)*X -
1.45734701095912E+04f /X+2.69831813951849E+03f)*E
+ R33/(X-R33);
}
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else {
WW1 = sqrtf(PIE4/X);
if (n == 1)
RT1 = 0.5E+00f/(X-0.5E+00f);
else if (n == 2) {
if (X < 40.f) {
E = expf(-X);
RT1 = (-8.78947307498880E-01f*X+1.09243702330261E+01f)*E
+ R12/(X-R12);
RT2 = (-9.28903924275977E+00f*X+8.10642367843811E+01f)*E
+ R22/(X-R22);
WW2 = ( 4.46857389308400E+00f*X-7.79250653461045E+01f)*E + W22*WW1;
WW1 = WW1-WW2;
} else {
RT1 = R12/(X-R12);
RT2 = R22/(X-R22);
WW2 = W22*WW1;
WW1 = WW1-WW2;
}
} else if (n == 3) {
if (X < 47.f) {
E = expf(-X);
RT1 = ((-7.39058467995275E+00f*X+3.21318352526305E+02f)*X -
3.99433696473658E+03f)*E + R13/(X-R13);
RT2 = ((-7.38726243906513E+01f*X+3.13569966333873E+03f)*X -
3.86862867311321E+04f)*E + R23/(X-R23);
RT3 = ((-2.63750565461336E+02f*X+1.04412168692352E+04f)*X -
1.28094577915394E+05f)*E + R33/(X-R33);
WW3 = ((( 1.52258947224714E-01f*X-8.30661900042651E+00f)*X +
1.92977367967984E+02f)*X-1.67787926005344E+03f)*E
+ W33*WW1;
WW2 = (( 6.15072615497811E+01f*X-2.91980647450269E+03f)*X +
3.80794303087338E+04f)*E + W23*WW1;
WW1 = WW1-WW2-WW3;
} else {
RT1 = R13/(X-R13);
RT2 = R23/(X-R23);
RT3 = R33/(X-R33);
WW2 = W23*WW1;
WW3 = W33*WW1;
WW1 = WW1-WW2-WW3;
}
}
}
roots[0] = RT1;
weights[0] = WW1;
if (n > 1){
roots[1] = RT2;
weights[1] = WW2;
}
if (n > 2) {
roots[2] = RT3;
weights[2] = WW3;
}
return;
}
__device__ void cuda_Root4(float X, float roots[], float weights[]){
float R14,PIE4,R24,W24,R34,W34,R44,W44;
float RT1=0,RT2=0,RT3=0,RT4=0,WW1=0,WW2=0,WW3=0,WW4=0;
float Y,E;
R14 = 1.45303521503316E-01f;
PIE4 = 7.85398163397448E-01f;
R24 = 1.33909728812636E+00f;
W24 = 2.34479815323517E-01f;
R34 = 3.92696350135829E+00f;
W34 = 1.92704402415764E-02f;
R44 = 8.58863568901199E+00f;
W44 = 2.25229076750736E-04f;
if (X <= 3.0E-7f) {
RT1 = 3.48198973061471E-02f -4.09645850660395E-03f *X;
RT2 = 3.81567185080042E-01f -4.48902570656719E-02f *X;
RT3 = 1.73730726945891E+00f -2.04389090547327E-01f *X;
RT4 = 1.18463056481549E+01f -1.39368301742312E+00f *X;
WW1 = 3.62683783378362E-01f -3.13844305713928E-02f *X;
WW2 = 3.13706645877886E-01f -8.98046242557724E-02f *X;
WW3 = 2.22381034453372E-01f -1.29314370958973E-01f *X;
WW4 = 1.01228536290376E-01f -8.28299075414321E-02f *X;
} else if (X <= 1.f) {
RT1 = ((((((-1.95309614628539E-10f*X+5.19765728707592E-09f)*X-
1.01756452250573E-07f )*X+1.72365935872131E-06f )*X-
2.61203523522184E-05f )*X+3.52921308769880E-04f )*X-
4.09645850658433E-03f )*X+3.48198973061469E-02f;
RT2 = (((((-1.89554881382342E-08f*X+3.07583114342365E-07f)*X+
1.270981734393E-06f)*X-1.417298563884E-04f)*X+
3.226979163176E-03f)*X-4.48902570678178E-02f )*X+
3.81567185080039E-01f;
RT3 = (((((( 1.77280535300416E-09f*X+3.36524958870615E-08f)*X-
2.58341529013893E-07f )*X-1.13644895662320E-05f )*X-
7.91549618884063E-05f )*X+1.03825827346828E-02f )*X-
2.04389090525137E-01f )*X+1.73730726945889E+00f;
RT4 = (((((-5.61188882415248E-08f*X-2.49480733072460E-07f)*X+
3.428685057114E-06f)*X+1.679007454539E-04f)*X+
4.722855585715E-02f)*X-1.39368301737828E+00f )*X+
1.18463056481543E+01f;
WW1 = ((((((-1.14649303201279E-08f*X+1.88015570196787E-07f)*X-
2.33305875372323E-06f )*X+2.68880044371597E-05f )*X-
2.94268428977387E-04f )*X+3.06548909776613E-03f )*X-
3.13844305680096E-02f )*X+3.62683783378335E-01f;
WW2 = ((((((((-4.11720483772634E-09f*X+6.54963481852134E-08f)*X-
7.20045285129626E-07f )*X+6.93779646721723E-06f )*X-
6.05367572016373E-05f )*X+4.74241566251899E-04f )*X-
3.26956188125316E-03f )*X+1.91883866626681E-02f )*X-
8.98046242565811E-02f )*X+3.13706645877886E-01f;
WW3 = ((((((((-3.41688436990215E-08f*X+5.07238960340773E-07f)*X-
5.01675628408220E-06f )*X+4.20363420922845E-05f )*X-
3.08040221166823E-04f )*X+1.94431864731239E-03f )*X-
1.02477820460278E-02f )*X+4.28670143840073E-02f )*X-
1.29314370962569E-01f )*X+2.22381034453369E-01f;
WW4 = ((((((((( 4.99660550769508E-09f*X-7.94585963310120E-08f)*X+
8.359072409485E-07f)*X-7.422369210610E-06f)*X+
5.763374308160E-05f)*X-3.86645606718233E-04f )*X+
2.18417516259781E-03f )*X-9.99791027771119E-03f )*X+
3.48791097377370E-02f )*X-8.28299075413889E-02f )*X+
1.01228536290376E-01f;
} else if (X <= 5.f) {
Y = X-3.0E+00f;
RT1 = (((((((((-1.48570633747284E-15f*Y-1.33273068108777E-13f)*Y+
4.068543696670E-12f)*Y-9.163164161821E-11f)*Y+
2.046819017845E-09f)*Y-4.03076426299031E-08f )*Y+
7.29407420660149E-07f )*Y-1.23118059980833E-05f )*Y+
1.88796581246938E-04f )*Y-2.53262912046853E-03f )*Y+
2.51198234505021E-02f;
RT2 = ((((((((( 1.35830583483312E-13f*Y-2.29772605964836E-12f)*Y-
3.821500128045E-12f)*Y+6.844424214735E-10f)*Y-
1.048063352259E-08f)*Y+1.50083186233363E-08f )*Y+
3.48848942324454E-06f )*Y-1.08694174399193E-04f )*Y+
2.08048885251999E-03f )*Y-2.91205805373793E-02f )*Y+
2.72276489515713E-01f;
RT3 = ((((((((( 5.02799392850289E-13f*Y+1.07461812944084E-11f)*Y-
1.482277886411E-10f)*Y-2.153585661215E-09f)*Y+
3.654087802817E-08f)*Y+5.15929575830120E-07f )*Y-
9.52388379435709E-06f )*Y-2.16552440036426E-04f )*Y+
9.03551469568320E-03f )*Y-1.45505469175613E-01f )*Y+
1.21449092319186E+00f;
RT4 = (((((((((-1.08510370291979E-12f*Y+6.41492397277798E-11f)*Y+
7.542387436125E-10f)*Y-2.213111836647E-09f)*Y-
1.448228963549E-07f)*Y-1.95670833237101E-06f )*Y-
1.07481314670844E-05f )*Y+1.49335941252765E-04f )*Y+
4.87791531990593E-02f )*Y-1.10559909038653E+00f )*Y+
8.09502028611780E+00f;
WW1 = ((((((((((-4.65801912689961E-14f*Y+7.58669507106800E-13f)*Y-
1.186387548048E-11f)*Y+1.862334710665E-10f)*Y-
2.799399389539E-09f)*Y+4.148972684255E-08f)*Y-
5.933568079600E-07f)*Y+8.168349266115E-06f)*Y-
1.08989176177409E-04f )*Y+1.41357961729531E-03f )*Y-
1.87588361833659E-02f )*Y+2.89898651436026E-01f;
WW2 = ((((((((((((-1.46345073267549E-14f*Y+2.25644205432182E-13f)*Y-
3.116258693847E-12f)*Y+4.321908756610E-11f)*Y-
5.673270062669E-10f)*Y+7.006295962960E-09f)*Y-
8.120186517000E-08f)*Y+8.775294645770E-07f)*Y-
8.77829235749024E-06f )*Y+8.04372147732379E-05f )*Y-
6.64149238804153E-04f )*Y+4.81181506827225E-03f )*Y-
2.88982669486183E-02f )*Y+1.56247249979288E-01f;
WW3 = ((((((((((((( 9.06812118895365E-15f*Y-1.40541322766087E-13f)*
Y+1.919270015269E-12f)*Y-2.605135739010E-11f)*Y+
3.299685839012E-10f)*Y-3.86354139348735E-09f )*Y+
4.16265847927498E-08f )*Y-4.09462835471470E-07f )*Y+
3.64018881086111E-06f )*Y-2.88665153269386E-05f )*Y+
2.00515819789028E-04f )*Y-1.18791896897934E-03f )*Y+
5.75223633388589E-03f )*Y-2.09400418772687E-02f )*Y+
4.85368861938873E-02f;
WW4 = ((((((((((((((-9.74835552342257E-16f*Y+1.57857099317175E-14f)*
Y-2.249993780112E-13f)*Y+3.173422008953E-12f)*Y-
4.161159459680E-11f)*Y+5.021343560166E-10f)*Y-
5.545047534808E-09f)*Y+5.554146993491E-08f)*Y-
4.99048696190133E-07f )*Y+3.96650392371311E-06f )*Y-
2.73816413291214E-05f )*Y+1.60106988333186E-04f )*Y-
7.64560567879592E-04f )*Y+2.81330044426892E-03f )*Y-
7.16227030134947E-03f )*Y+9.66077262223353E-03f;
} else if (X <= 10.f) {
Y = X-7.5E+00f;
RT1 = ((((((((( 4.64217329776215E-15f*Y-6.27892383644164E-15f)*Y+
3.462236347446E-13f)*Y-2.927229355350E-11f)*Y+
5.090355371676E-10f)*Y-9.97272656345253E-09f )*Y+
2.37835295639281E-07f )*Y-4.60301761310921E-06f )*Y+
8.42824204233222E-05f )*Y-1.37983082233081E-03f )*Y+
1.66630865869375E-02f;
RT2 = ((((((((( 2.93981127919047E-14f*Y+8.47635639065744E-13f)*Y-
1.446314544774E-11f)*Y-6.149155555753E-12f)*Y+
8.484275604612E-10f)*Y-6.10898827887652E-08f )*Y+
2.39156093611106E-06f )*Y-5.35837089462592E-05f )*Y+
1.00967602595557E-03f )*Y-1.57769317127372E-02f )*Y+
1.74853819464285E-01f;
RT3 = (((((((((( 2.93523563363000E-14f*Y-6.40041776667020E-14f)*Y-
2.695740446312E-12f)*Y+1.027082960169E-10f)*Y-
5.822038656780E-10f)*Y-3.159991002539E-08f)*Y+
4.327249251331E-07f)*Y+4.856768455119E-06f)*Y-
2.54617989427762E-04f )*Y+5.54843378106589E-03f )*Y-
7.95013029486684E-02f )*Y+7.20206142703162E-01f;
RT4 = (((((((((((-1.62212382394553E-14f*Y+7.68943641360593E-13f)*Y+
5.764015756615E-12f)*Y-1.380635298784E-10f)*Y-
1.476849808675E-09f)*Y+1.84347052385605E-08f )*Y+
3.34382940759405E-07f )*Y-1.39428366421645E-06f )*Y-
7.50249313713996E-05f )*Y-6.26495899187507E-04f )*Y+
4.69716410901162E-02f )*Y-6.66871297428209E-01f )*Y+
4.11207530217806E+00f;
WW1 = ((((((((((-1.65995045235997E-15f*Y+6.91838935879598E-14f)*Y-
9.131223418888E-13f)*Y+1.403341829454E-11f)*Y-
3.672235069444E-10f)*Y+6.366962546990E-09f)*Y-
1.039220021671E-07f)*Y+1.959098751715E-06f)*Y-
3.33474893152939E-05f )*Y+5.72164211151013E-04f )*Y-
1.05583210553392E-02f )*Y+2.26696066029591E-01f;
WW2 = ((((((((((((-3.57248951192047E-16f*Y+6.25708409149331E-15f)*Y-
9.657033089714E-14f)*Y+1.507864898748E-12f)*Y-
2.332522256110E-11f)*Y+3.428545616603E-10f)*Y-
4.698730937661E-09f)*Y+6.219977635130E-08f)*Y-
7.83008889613661E-07f )*Y+9.08621687041567E-06f )*Y-
9.86368311253873E-05f )*Y+9.69632496710088E-04f )*Y-
8.14594214284187E-03f )*Y+8.50218447733457E-02f;
WW3 = ((((((((((((( 1.64742458534277E-16f*Y-2.68512265928410E-15f)*
Y+3.788890667676E-14f)*Y-5.508918529823E-13f)*Y+
7.555896810069E-12f)*Y-9.69039768312637E-11f )*Y+
1.16034263529672E-09f )*Y-1.28771698573873E-08f )*Y+
1.31949431805798E-07f )*Y-1.23673915616005E-06f )*Y+
1.04189803544936E-05f )*Y-7.79566003744742E-05f )*Y+
5.03162624754434E-04f )*Y-2.55138844587555E-03f )*Y+
1.13250730954014E-02f;
WW4 = ((((((((((((((-1.55714130075679E-17f*Y+2.57193722698891E-16f)*
Y-3.626606654097E-15f)*Y+5.234734676175E-14f)*Y-
7.067105402134E-13f)*Y+8.793512664890E-12f)*Y-
1.006088923498E-10f)*Y+1.050565098393E-09f)*Y-
9.91517881772662E-09f )*Y+8.35835975882941E-08f )*Y-
6.19785782240693E-07f )*Y+3.95841149373135E-06f )*Y-
2.11366761402403E-05f )*Y+9.00474771229507E-05f )*Y-
2.78777909813289E-04f )*Y+5.26543779837487E-04f;
} else if (X <= 15.f) {
Y = X-12.5E+00f;
RT1 = ((((((((((( 4.94869622744119E-17f*Y+8.03568805739160E-16f)*Y-
5.599125915431E-15f)*Y-1.378685560217E-13f)*Y+
7.006511663249E-13f)*Y+1.30391406991118E-11f )*Y+
8.06987313467541E-11f )*Y-5.20644072732933E-09f )*Y+
7.72794187755457E-08f )*Y-1.61512612564194E-06f )*Y+
4.15083811185831E-05f )*Y-7.87855975560199E-04f )*Y+
1.14189319050009E-02f;
RT2 = ((((((((((( 4.89224285522336E-16f*Y+1.06390248099712E-14f)*Y-
5.446260182933E-14f)*Y-1.613630106295E-12f)*Y+
3.910179118937E-12f)*Y+1.90712434258806E-10f )*Y+
8.78470199094761E-10f )*Y-5.97332993206797E-08f )*Y+
9.25750831481589E-07f )*Y-2.02362185197088E-05f )*Y+
4.92341968336776E-04f )*Y-8.68438439874703E-03f )*Y+
1.15825965127958E-01f;
RT3 = (((((((((( 6.12419396208408E-14f*Y+1.12328861406073E-13f)*Y-
9.051094103059E-12f)*Y-4.781797525341E-11f)*Y+
1.660828868694E-09f)*Y+4.499058798868E-10f)*Y-
2.519549641933E-07f)*Y+4.977444040180E-06f)*Y-
1.25858350034589E-04f )*Y+2.70279176970044E-03f )*Y-
3.99327850801083E-02f )*Y+4.33467200855434E-01f;
RT4 = ((((((((((( 4.63414725924048E-14f*Y-4.72757262693062E-14f)*Y-
1.001926833832E-11f)*Y+6.074107718414E-11f)*Y+
1.576976911942E-09f)*Y-2.01186401974027E-08f )*Y-
1.84530195217118E-07f )*Y+5.02333087806827E-06f )*Y+
9.66961790843006E-06f )*Y-1.58522208889528E-03f )*Y+
2.80539673938339E-02f )*Y-2.78953904330072E-01f )*Y+
1.82835655238235E+00f;
WW4 = ((((((((((((( 2.90401781000996E-18f*Y-4.63389683098251E-17f)*
Y+6.274018198326E-16f)*Y-8.936002188168E-15f)*Y+
1.194719074934E-13f)*Y-1.45501321259466E-12f )*Y+
1.64090830181013E-11f )*Y-1.71987745310181E-10f )*Y+
1.63738403295718E-09f )*Y-1.39237504892842E-08f )*Y+
1.06527318142151E-07f )*Y-7.27634957230524E-07f )*Y+
4.12159381310339E-06f )*Y-1.74648169719173E-05f )*Y+
8.50290130067818E-05f;
WW3 = ((((((((((((-4.19569145459480E-17f*Y+5.94344180261644E-16f)*Y-
1.148797566469E-14f)*Y+1.881303962576E-13f)*Y-
2.413554618391E-12f)*Y+3.372127423047E-11f)*Y-
4.933988617784E-10f)*Y+6.116545396281E-09f)*Y-
6.69965691739299E-08f )*Y+7.52380085447161E-07f )*Y-
8.08708393262321E-06f )*Y+6.88603417296672E-05f )*Y-
4.67067112993427E-04f )*Y+5.42313365864597E-03f;
WW2 = ((((((((((-6.22272689880615E-15f*Y+1.04126809657554E-13f)*Y-
6.842418230913E-13f)*Y+1.576841731919E-11f)*Y-
4.203948834175E-10f)*Y+6.287255934781E-09f)*Y-
8.307159819228E-08f)*Y+1.356478091922E-06f)*Y-
2.08065576105639E-05f )*Y+2.52396730332340E-04f )*Y-
2.94484050194539E-03f )*Y+6.01396183129168E-02f;
WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X -
4.9893752514047E-01f)/X-2.1916512131607E-05f)*expf(-X) +
sqrtf(PIE4/X)-WW4-WW3-WW2;
} else if (X <= 20.f) {
WW1 = sqrtf(PIE4/X);
Y = X-17.5E+00f;
RT1 = ((((((((((( 4.36701759531398E-17f*Y-1.12860600219889E-16f)*Y-
6.149849164164E-15f)*Y+5.820231579541E-14f)*Y+
4.396602872143E-13f)*Y-1.24330365320172E-11f )*Y+
6.71083474044549E-11f )*Y+2.43865205376067E-10f )*Y+
1.67559587099969E-08f )*Y-9.32738632357572E-07f )*Y+
2.39030487004977E-05f )*Y-4.68648206591515E-04f )*Y+
8.34977776583956E-03f;
RT2 = ((((((((((( 4.98913142288158E-16f*Y-2.60732537093612E-16f)*Y-
7.775156445127E-14f)*Y+5.766105220086E-13f)*Y+
6.432696729600E-12f)*Y-1.39571683725792E-10f )*Y+
5.95451479522191E-10f )*Y+2.42471442836205E-09f )*Y+
2.47485710143120E-07f )*Y-1.14710398652091E-05f )*Y+
2.71252453754519E-04f )*Y-4.96812745851408E-03f )*Y+
8.26020602026780E-02f;
RT3 = ((((((((((( 1.91498302509009E-15f*Y+1.48840394311115E-14f)*Y-
4.316925145767E-13f)*Y+1.186495793471E-12f)*Y+
4.615806713055E-11f)*Y-5.54336148667141E-10f )*Y+
3.48789978951367E-10f )*Y-2.79188977451042E-09f )*Y+
2.09563208958551E-06f )*Y-6.76512715080324E-05f )*Y+
1.32129867629062E-03f )*Y-2.05062147771513E-02f )*Y+
2.88068671894324E-01f;
RT4 = (((((((((((-5.43697691672942E-15f*Y-1.12483395714468E-13f)*Y+
2.826607936174E-12f)*Y-1.266734493280E-11f)*Y-
4.258722866437E-10f)*Y+9.45486578503261E-09f )*Y-
5.86635622821309E-08f )*Y-1.28835028104639E-06f )*Y+
4.41413815691885E-05f )*Y-7.61738385590776E-04f )*Y+
9.66090902985550E-03f )*Y-1.01410568057649E-01f )*Y+
9.54714798156712E-01f;
WW4 = ((((((((((((-7.56882223582704E-19f*Y+7.53541779268175E-18f)*Y-
1.157318032236E-16f)*Y+2.411195002314E-15f)*Y-
3.601794386996E-14f)*Y+4.082150659615E-13f)*Y-
4.289542980767E-12f)*Y+5.086829642731E-11f)*Y-
6.35435561050807E-10f )*Y+6.82309323251123E-09f )*Y-
5.63374555753167E-08f )*Y+3.57005361100431E-07f )*Y-
2.40050045173721E-06f )*Y+4.94171300536397E-05f;
WW3 = (((((((((((-5.54451040921657E-17f*Y+2.68748367250999E-16f)*Y+
1.349020069254E-14f)*Y-2.507452792892E-13f)*Y+
1.944339743818E-12f)*Y-1.29816917658823E-11f )*Y+
3.49977768819641E-10f )*Y-8.67270669346398E-09f )*Y+
1.31381116840118E-07f )*Y-1.36790720600822E-06f )*Y+
1.19210697673160E-05f )*Y-1.42181943986587E-04f )*Y+
4.12615396191829E-03f;
WW2 = (((((((((((-1.86506057729700E-16f*Y+1.16661114435809E-15f)*Y+
2.563712856363E-14f)*Y-4.498350984631E-13f)*Y+
1.765194089338E-12f)*Y+9.04483676345625E-12f )*Y+
4.98930345609785E-10f )*Y-2.11964170928181E-08f )*Y+
3.98295476005614E-07f )*Y-5.49390160829409E-06f )*Y+
7.74065155353262E-05f )*Y-1.48201933009105E-03f )*Y+
4.97836392625268E-02f;
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*expf(-X)+WW1-WW2-WW3-WW4;
} else if (X <= 35.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X);
RT1 = ((((((-4.45711399441838E-05f*X+1.27267770241379E-03f)*X -
2.36954961381262E-01f)*X+1.54330657903756E+01f)*X -
5.22799159267808E+02f)*X+1.05951216669313E+04f)*X +
(-2.51177235556236E+06f/X+8.72975373557709E+05f)/X -
1.29194382386499E+05f)*E + R14/(X-R14);
RT2 = (((((-7.85617372254488E-02f*X+6.35653573484868E+00f)*X -
3.38296938763990E+02f)*X+1.25120495802096E+04f)*X -
3.16847570511637E+05f)*X +
((-1.02427466127427E+09f/X +
3.70104713293016E+08f)/X-5.87119005093822E+07f)/X +
5.38614211391604E+06f)*E + R24/(X-R24);
RT3 = (((((-2.37900485051067E-01f*X+1.84122184400896E+01f)*X -
1.00200731304146E+03f)*X+3.75151841595736E+04f)*X -
9.50626663390130E+05f)*X +
((-2.88139014651985E+09f/X +
1.06625915044526E+09f)/X-1.72465289687396E+08f)/X +
1.60419390230055E+07f)*E + R34/(X-R34);
RT4 = ((((((-6.00691586407385E-04f*X-3.64479545338439E-01f)*X +
1.57496131755179E+01f)*X-6.54944248734901E+02f)*X +
1.70830039597097E+04f)*X-2.90517939780207E+05f)*X +
(3.49059698304732E+07f/X-1.64944522586065E+07f)/X +
2.96817940164703E+06f)*E + R44/(X-R44);
if (X <= 25.f)
WW4 = ((((((( 2.33766206773151E-07f*X-
3.81542906607063E-05f)*X +3.51416601267000E-03f)*X-
1.66538571864728E-01f)*X +4.80006136831847E+00f)*X-
8.73165934223603E+01f)*X +9.77683627474638E+02f)*X +
1.66000945117640E+04f/X -6.14479071209961E+03f)*E + W44*WW1;
else
WW4 = (((((( 5.74245945342286E-06f*X-
7.58735928102351E-05f)*X +2.35072857922892E-04f)*X-
3.78812134013125E-03f)*X +3.09871652785805E-01f)*X-
7.11108633061306E+00f)*X +5.55297573149528E+01f)*E + W44*WW1;
WW3 = (((((( 2.36392855180768E-04f*X-9.16785337967013E-03f)*X +
4.62186525041313E-01f)*X-1.96943786006540E+01f)*X +
4.99169195295559E+02f)*X-6.21419845845090E+03f)*X +
((+5.21445053212414E+07f/X-1.34113464389309E+07f)/X +
1.13673298305631E+06f)/X-2.81501182042707E+03f)*E + W34*WW1;
WW2 = (((((( 7.29841848989391E-04f*X-3.53899555749875E-02f)*X +
2.07797425718513E+00f)*X-1.00464709786287E+02f)*X +
3.15206108877819E+03f)*X-6.27054715090012E+04f)*X +
(+1.54721246264919E+07f/X-5.26074391316381E+06f)/X +
7.67135400969617E+05f)*E + W24*WW1;
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*E + WW1-WW2-WW3-WW4;
} else if (X <= 53.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X)*powf(X,4.f);
RT4 = ((-2.19135070169653E-03f*X-1.19108256987623E-01f)*X -
7.50238795695573E-01f)*E + R44/(X-R44);
RT3 = ((-9.65842534508637E-04f*X-4.49822013469279E-02f)*X +
6.08784033347757E-01f)*E + R34/(X-R34);
RT2 = ((-3.62569791162153E-04f*X-9.09231717268466E-03f)*X +
1.84336760556262E-01f)*E + R24/(X-R24);
RT1 = ((-4.07557525914600E-05f*X-6.88846864931685E-04f)*X +
1.74725309199384E-02f)*E + R14/(X-R14);
WW4 = (( 5.76631982000990E-06f*X-7.89187283804890E-05f)*X +
3.28297971853126E-04f)*E + W44*WW1;
WW3 = (( 2.08294969857230E-04f*X-3.77489954837361E-03f)*X +
2.09857151617436E-02f)*E + W34*WW1;
WW2 = (( 6.16374517326469E-04f*X-1.26711744680092E-02f)*X +
8.14504890732155E-02f)*E + W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
} else {
WW1 = sqrtf(PIE4/X);
RT1 = R14/(X-R14);
RT2 = R24/(X-R24);
RT3 = R34/(X-R34);
RT4 = R44/(X-R44);
WW4 = W44*WW1;
WW3 = W34*WW1;
WW2 = W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
return;
}
__device__ void cuda_Root5(float X, float roots[], float weights[]){
float R15,PIE4,R25,W25,R35,W35,R45,W45,R55,W55;
float RT1=0,RT2=0,RT3=0,RT4=0,RT5=0,
WW1=0,WW2=0,WW3=0,WW4=0,WW5=0;
float Y,E=0,XXX;
R15 = 1.17581320211778E-01f;
PIE4 = 7.85398163397448E-01f;
R25 = 1.07456201243690E+00f;
W25 = 2.70967405960535E-01f;
R35 = 3.08593744371754E+00f;
W35 = 3.82231610015404E-02f;
R45 = 6.41472973366203E+00f;
W45 = 1.51614186862443E-03f;
R55 = 1.18071894899717E+01f;
W55 = 8.62130526143657E-06f;
if (X < 3.e-7f){
RT1 = 2.26659266316985E-02f -2.15865967920897E-03f *X;
RT2 = 2.31271692140903E-01f -2.20258754389745E-02f *X;
RT3 = 8.57346024118836E-01f -8.16520023025515E-02f *X;
RT4 = 2.97353038120346E+00f -2.83193369647137E-01f *X;
RT5 = 1.84151859759051E+01f -1.75382723579439E+00f *X;
WW1 = 2.95524224714752E-01f -1.96867576909777E-02f *X;
WW2 = 2.69266719309995E-01f -5.61737590184721E-02f *X;
WW3 = 2.19086362515981E-01f -9.71152726793658E-02f *X;
WW4 = 1.49451349150580E-01f -1.02979262193565E-01f *X;
WW5 = 6.66713443086877E-02f -5.73782817488315E-02f *X;
} else if (X < 1.f){
RT1 = ((((((-4.46679165328413E-11f*X+1.21879111988031E-09f)*X-
2.62975022612104E-08f )*X+5.15106194905897E-07f )*X-
9.27933625824749E-06f )*X+1.51794097682482E-04f )*X-
2.15865967920301E-03f )*X+2.26659266316985E-02f;
RT2 = (((((( 1.93117331714174E-10f*X-4.57267589660699E-09f)*X+
2.48339908218932E-08f )*X+1.50716729438474E-06f )*X-
6.07268757707381E-05f )*X+1.37506939145643E-03f )*X-
2.20258754419939E-02f )*X+2.31271692140905E-01f;
RT3 = ((((( 4.84989776180094E-09f*X+1.31538893944284E-07f)*X-
2.766753852879E-06f)*X-7.651163510626E-05f)*X+
4.033058545972E-03f)*X-8.16520022916145E-02f )*X+
8.57346024118779E-01f;
RT4 = ((((-2.48581772214623E-07f*X-4.34482635782585E-06f)*X-
7.46018257987630E-07f )*X+1.01210776517279E-02f )*X-
2.83193369640005E-01f )*X+2.97353038120345E+00f;
RT5 = (((((-8.92432153868554E-09f*X+1.77288899268988E-08f)*X+
3.040754680666E-06f)*X+1.058229325071E-04f)*X+
4.596379534985E-02f)*X-1.75382723579114E+00f )*X+
1.84151859759049E+01f;
WW1 = ((((((-2.03822632771791E-09f*X+3.89110229133810E-08f)*X-
5.84914787904823E-07f )*X+8.30316168666696E-06f )*X-
1.13218402310546E-04f )*X+1.49128888586790E-03f )*X-
1.96867576904816E-02f )*X+2.95524224714749E-01f;
WW2 = ((((((( 8.62848118397570E-09f*X-1.38975551148989E-07f)*X+
1.602894068228E-06f)*X-1.646364300836E-05f)*X+
1.538445806778E-04f)*X-1.28848868034502E-03f )*X+
9.38866933338584E-03f )*X-5.61737590178812E-02f )*X+
2.69266719309991E-01f;
WW3 = ((((((((-9.41953204205665E-09f*X+1.47452251067755E-07f)*X-
1.57456991199322E-06f )*X+1.45098401798393E-05f )*X-
1.18858834181513E-04f )*X+8.53697675984210E-04f )*X-
5.22877807397165E-03f )*X+2.60854524809786E-02f )*X-
9.71152726809059E-02f )*X+2.19086362515979E-01f;
WW4 = ((((((((-3.84961617022042E-08f*X+5.66595396544470E-07f)*X-
5.52351805403748E-06f )*X+4.53160377546073E-05f )*X-
3.22542784865557E-04f )*X+1.95682017370967E-03f )*X-
9.77232537679229E-03f )*X+3.79455945268632E-02f )*X-
1.02979262192227E-01f )*X+1.49451349150573E-01f;
WW5 = ((((((((( 4.09594812521430E-09f*X-6.47097874264417E-08f)*X+
6.743541482689E-07f)*X-5.917993920224E-06f)*X+
4.531969237381E-05f)*X-2.99102856679638E-04f )*X+
1.65695765202643E-03f )*X-7.40671222520653E-03f )*X+
2.50889946832192E-02f )*X-5.73782817487958E-02f )*X+
6.66713443086877E-02f;
} else if (X < 5.f) {
Y = X-3.0E+00f;
RT1 = ((((((((-2.58163897135138E-14f*Y+8.14127461488273E-13f)*Y-
2.11414838976129E-11f )*Y+5.09822003260014E-10f )*Y-
1.16002134438663E-08f )*Y+2.46810694414540E-07f )*Y-
4.92556826124502E-06f )*Y+9.02580687971053E-05f )*Y-
1.45190025120726E-03f )*Y+1.73416786387475E-02f;
RT2 = ((((((((( 1.04525287289788E-14f*Y+5.44611782010773E-14f)*Y-
4.831059411392E-12f)*Y+1.136643908832E-10f)*Y-
1.104373076913E-09f)*Y-2.35346740649916E-08f )*Y+
1.43772622028764E-06f )*Y-4.23405023015273E-05f )*Y+
9.12034574793379E-04f )*Y-1.52479441718739E-02f )*Y+
1.76055265928744E-01f;
RT3 = (((((((((-6.89693150857911E-14f*Y+5.92064260918861E-13f)*Y+
1.847170956043E-11f)*Y-3.390752744265E-10f)*Y-
2.995532064116E-09f)*Y+1.57456141058535E-07f )*Y-
3.95859409711346E-07f )*Y-9.58924580919747E-05f )*Y+
3.23551502557785E-03f )*Y-5.97587007636479E-02f )*Y+
6.46432853383057E-01f;
RT4 = ((((((((-3.61293809667763E-12f*Y-2.70803518291085E-11f)*Y+
8.83758848468769E-10f )*Y+1.59166632851267E-08f )*Y-
1.32581997983422E-07f )*Y-7.60223407443995E-06f )*Y-
7.41019244900952E-05f )*Y+9.81432631743423E-03f )*Y-
2.23055570487771E-01f )*Y+2.21460798080643E+00f;
RT5 = ((((((((( 7.12332088345321E-13f*Y+3.16578501501894E-12f)*Y-
8.776668218053E-11f)*Y-2.342817613343E-09f)*Y-
3.496962018025E-08f)*Y-3.03172870136802E-07f )*Y+
1.50511293969805E-06f )*Y+1.37704919387696E-04f )*Y+
4.70723869619745E-02f )*Y-1.47486623003693E+00f )*Y+
1.35704792175847E+01f;
WW1 = ((((((((( 1.04348658616398E-13f*Y-1.94147461891055E-12f)*Y+
3.485512360993E-11f)*Y-6.277497362235E-10f)*Y+
1.100758247388E-08f)*Y-1.88329804969573E-07f )*Y+
3.12338120839468E-06f )*Y-5.04404167403568E-05f )*Y+
8.00338056610995E-04f )*Y-1.30892406559521E-02f )*Y+
2.47383140241103E-01f;
WW2 = ((((((((((( 3.23496149760478E-14f*Y-5.24314473469311E-13f)*Y+
7.743219385056E-12f)*Y-1.146022750992E-10f)*Y+
1.615238462197E-09f)*Y-2.15479017572233E-08f )*Y+
2.70933462557631E-07f )*Y-3.18750295288531E-06f )*Y+
3.47425221210099E-05f )*Y-3.45558237388223E-04f )*Y+
3.05779768191621E-03f )*Y-2.29118251223003E-02f )*Y+
1.59834227924213E-01f;
WW3 = ((((((((((((-3.42790561802876E-14f*Y+5.26475736681542E-13f)*Y-
7.184330797139E-12f)*Y+9.763932908544E-11f)*Y-
1.244014559219E-09f)*Y+1.472744068942E-08f)*Y-
1.611749975234E-07f)*Y+1.616487851917E-06f)*Y-
1.46852359124154E-05f )*Y+1.18900349101069E-04f )*Y-
8.37562373221756E-04f )*Y+4.93752683045845E-03f )*Y-
2.25514728915673E-02f )*Y+6.95211812453929E-02f;
WW4 = ((((((((((((( 1.04072340345039E-14f*Y-1.60808044529211E-13f)*
Y+2.183534866798E-12f)*Y-2.939403008391E-11f)*Y+
3.679254029085E-10f)*Y-4.23775673047899E-09f )*Y+
4.46559231067006E-08f )*Y-4.26488836563267E-07f )*Y+
3.64721335274973E-06f )*Y-2.74868382777722E-05f )*Y+
1.78586118867488E-04f )*Y-9.68428981886534E-04f )*Y+
4.16002324339929E-03f )*Y-1.28290192663141E-02f )*Y+
2.22353727685016E-02f;
WW5 = ((((((((((((((-8.16770412525963E-16f*Y+1.31376515047977E-14f)*
Y-1.856950818865E-13f)*Y+2.596836515749E-12f)*Y-
3.372639523006E-11f)*Y+4.025371849467E-10f)*Y-
4.389453269417E-09f)*Y+4.332753856271E-08f)*Y-
3.82673275931962E-07f )*Y+2.98006900751543E-06f )*Y-
2.00718990300052E-05f )*Y+1.13876001386361E-04f )*Y-
5.23627942443563E-04f )*Y+1.83524565118203E-03f )*Y-
4.37785737450783E-03f )*Y+5.36963805223095E-03f;
} else if (X < 10.f) {
Y = X-7.5E+00f;
RT1 = ((((((((-1.13825201010775E-14f*Y+1.89737681670375E-13f)*Y-
4.81561201185876E-12f )*Y+1.56666512163407E-10f )*Y-
3.73782213255083E-09f )*Y+9.15858355075147E-08f )*Y-
2.13775073585629E-06f )*Y+4.56547356365536E-05f )*Y-
8.68003909323740E-04f )*Y+1.22703754069176E-02f;
RT2 = (((((((((-3.67160504428358E-15f*Y+1.27876280158297E-14f)*Y-
1.296476623788E-12f)*Y+1.477175434354E-11f)*Y+
5.464102147892E-10f)*Y-2.42538340602723E-08f )*Y+
8.20460740637617E-07f )*Y-2.20379304598661E-05f )*Y+
4.90295372978785E-04f )*Y-9.14294111576119E-03f )*Y+
1.22590403403690E-01f;
RT3 = ((((((((( 1.39017367502123E-14f*Y-6.96391385426890E-13f)*Y+
1.176946020731E-12f)*Y+1.725627235645E-10f)*Y-
3.686383856300E-09f)*Y+2.87495324207095E-08f )*Y+
1.71307311000282E-06f )*Y-7.94273603184629E-05f )*Y+
2.00938064965897E-03f )*Y-3.63329491677178E-02f )*Y+
4.34393683888443E-01f;
RT4 = ((((((((((-1.27815158195209E-14f*Y+1.99910415869821E-14f)*Y+
3.753542914426E-12f)*Y-2.708018219579E-11f)*Y-
1.190574776587E-09f)*Y+1.106696436509E-08f)*Y+
3.954955671326E-07f)*Y-4.398596059588E-06f)*Y-
2.01087998907735E-04f )*Y+7.89092425542937E-03f )*Y-
1.42056749162695E-01f )*Y+1.39964149420683E+00f;
RT5 = ((((((((((-1.19442341030461E-13f*Y-2.34074833275956E-12f)*Y+
6.861649627426E-12f)*Y+6.082671496226E-10f)*Y+
5.381160105420E-09f)*Y-6.253297138700E-08f)*Y-
2.135966835050E-06f)*Y-2.373394341886E-05f)*Y+
2.88711171412814E-06f )*Y+4.85221195290753E-02f )*Y-
1.04346091985269E+00f )*Y+7.89901551676692E+00f;
WW1 = ((((((((( 7.95526040108997E-15f*Y-2.48593096128045E-13f)*Y+
4.761246208720E-12f)*Y-9.535763686605E-11f)*Y+
2.225273630974E-09f)*Y-4.49796778054865E-08f )*Y+
9.17812870287386E-07f )*Y-1.86764236490502E-05f )*Y+
3.76807779068053E-04f )*Y-8.10456360143408E-03f )*Y+
2.01097936411496E-01f;
WW2 = ((((((((((( 1.25678686624734E-15f*Y-2.34266248891173E-14f)*Y+
3.973252415832E-13f)*Y-6.830539401049E-12f)*Y+
1.140771033372E-10f)*Y-1.82546185762009E-09f )*Y+
2.77209637550134E-08f )*Y-4.01726946190383E-07f )*Y+
5.48227244014763E-06f )*Y-6.95676245982121E-05f )*Y+
8.05193921815776E-04f )*Y-8.15528438784469E-03f )*Y+
9.71769901268114E-02f;
WW3 = ((((((((((((-8.20929494859896E-16f*Y+1.37356038393016E-14f)*Y-
2.022863065220E-13f)*Y+3.058055403795E-12f)*Y-
4.387890955243E-11f)*Y+5.923946274445E-10f)*Y-
7.503659964159E-09f)*Y+8.851599803902E-08f)*Y-
9.65561998415038E-07f )*Y+9.60884622778092E-06f )*Y-
8.56551787594404E-05f )*Y+6.66057194311179E-04f )*Y-
4.17753183902198E-03f )*Y+2.25443826852447E-02f;
WW4 = ((((((((((((((-1.08764612488790E-17f*Y+1.85299909689937E-16f)*
Y-2.730195628655E-15f)*Y+4.127368817265E-14f)*Y-
5.881379088074E-13f)*Y+7.805245193391E-12f)*Y-
9.632707991704E-11f)*Y+1.099047050624E-09f)*Y-
1.15042731790748E-08f )*Y+1.09415155268932E-07f )*Y-
9.33687124875935E-07f )*Y+7.02338477986218E-06f )*Y-
4.53759748787756E-05f )*Y+2.41722511389146E-04f )*Y-
9.75935943447037E-04f )*Y+2.57520532789644E-03f;
WW5 = ((((((((((((((( 7.28996979748849E-19f*Y-1.26518146195173E-17f)
*Y+1.886145834486E-16f)*Y-2.876728287383E-15f)*Y+
4.114588668138E-14f)*Y-5.44436631413933E-13f )*Y+
6.64976446790959E-12f )*Y-7.44560069974940E-11f )*Y+
7.57553198166848E-10f )*Y-6.92956101109829E-09f )*Y+
5.62222859033624E-08f )*Y-3.97500114084351E-07f )*Y+
2.39039126138140E-06f )*Y-1.18023950002105E-05f )*Y+
4.52254031046244E-05f )*Y-1.21113782150370E-04f )*Y+
1.75013126731224E-04f;
} else if (X < 15.f) {
Y = X-12.5E+00f;
RT1 = ((((((((((-4.16387977337393E-17f*Y+7.20872997373860E-16f)*Y+
1.395993802064E-14f)*Y+3.660484641252E-14f)*Y-
4.154857548139E-12f)*Y+2.301379846544E-11f)*Y-
1.033307012866E-09f)*Y+3.997777641049E-08f)*Y-
9.35118186333939E-07f )*Y+2.38589932752937E-05f )*Y-
5.35185183652937E-04f )*Y+8.85218988709735E-03f;
RT2 = ((((((((((-4.56279214732217E-16f*Y+6.24941647247927E-15f)*Y+
1.737896339191E-13f)*Y+8.964205979517E-14f)*Y-
3.538906780633E-11f)*Y+9.561341254948E-11f)*Y-
9.772831891310E-09f)*Y+4.240340194620E-07f)*Y-
1.02384302866534E-05f )*Y+2.57987709704822E-04f )*Y-
5.54735977651677E-03f )*Y+8.68245143991948E-02f;
RT3 = ((((((((((-2.52879337929239E-15f*Y+2.13925810087833E-14f)*Y+
7.884307667104E-13f)*Y-9.023398159510E-13f)*Y-
5.814101544957E-11f)*Y-1.333480437968E-09f)*Y-
2.217064940373E-08f)*Y+1.643290788086E-06f)*Y-
4.39602147345028E-05f )*Y+1.08648982748911E-03f )*Y-
2.13014521653498E-02f )*Y+2.94150684465425E-01f;
RT4 = ((((((((((-6.42391438038888E-15f*Y+5.37848223438815E-15f)*Y+
8.960828117859E-13f)*Y+5.214153461337E-11f)*Y-
1.106601744067E-10f)*Y-2.007890743962E-08f)*Y+
1.543764346501E-07f)*Y+4.520749076914E-06f)*Y-
1.88893338587047E-04f )*Y+4.73264487389288E-03f )*Y-
7.91197893350253E-02f )*Y+8.60057928514554E-01f;
RT5 = (((((((((((-2.24366166957225E-14f*Y+4.87224967526081E-14f)*Y+
5.587369053655E-12f)*Y-3.045253104617E-12f)*Y-
1.223983883080E-09f)*Y-2.05603889396319E-09f )*Y+
2.58604071603561E-07f )*Y+1.34240904266268E-06f )*Y-
5.72877569731162E-05f )*Y-9.56275105032191E-04f )*Y+
4.23367010370921E-02f )*Y-5.76800927133412E-01f )*Y+
3.87328263873381E+00f;
WW1 = ((((((((( 8.98007931950169E-15f*Y+7.25673623859497E-14f)*Y+
5.851494250405E-14f)*Y-4.234204823846E-11f)*Y+
3.911507312679E-10f)*Y-9.65094802088511E-09f )*Y+
3.42197444235714E-07f )*Y-7.51821178144509E-06f )*Y+
1.94218051498662E-04f )*Y-5.38533819142287E-03f )*Y+
1.68122596736809E-01f;
WW2 = ((((((((((-1.05490525395105E-15f*Y+1.96855386549388E-14f)*Y-
5.500330153548E-13f)*Y+1.003849567976E-11f)*Y-
1.720997242621E-10f)*Y+3.533277061402E-09f)*Y-
6.389171736029E-08f)*Y+1.046236652393E-06f)*Y-
1.73148206795827E-05f )*Y+2.57820531617185E-04f )*Y-
3.46188265338350E-03f )*Y+7.03302497508176E-02f;
WW3 = ((((((((((( 3.60020423754545E-16f*Y-6.24245825017148E-15f)*Y+
9.945311467434E-14f)*Y-1.749051512721E-12f)*Y+
2.768503957853E-11f)*Y-4.08688551136506E-10f )*Y+
6.04189063303610E-09f )*Y-8.23540111024147E-08f )*Y+
1.01503783870262E-06f )*Y-1.20490761741576E-05f )*Y+
1.26928442448148E-04f )*Y-1.05539461930597E-03f )*Y+
1.15543698537013E-02f;
WW4 = ((((((((((((( 2.51163533058925E-18f*Y-4.31723745510697E-17f)*
Y+6.557620865832E-16f)*Y-1.016528519495E-14f)*Y+
1.491302084832E-13f)*Y-2.06638666222265E-12f )*Y+
2.67958697789258E-11f )*Y-3.23322654638336E-10f )*Y+
3.63722952167779E-09f )*Y-3.75484943783021E-08f )*Y+
3.49164261987184E-07f )*Y-2.92658670674908E-06f )*Y+
2.12937256719543E-05f )*Y-1.19434130620929E-04f )*Y+
6.45524336158384E-04f;
WW5 = ((((((((((((((-1.29043630202811E-19f*Y+2.16234952241296E-18f)*
Y-3.107631557965E-17f)*Y+4.570804313173E-16f)*Y-
6.301348858104E-15f)*Y+8.031304476153E-14f)*Y-
9.446196472547E-13f)*Y+1.018245804339E-11f)*Y-
9.96995451348129E-11f )*Y+8.77489010276305E-10f )*Y-
6.84655877575364E-09f )*Y+4.64460857084983E-08f )*Y-
2.66924538268397E-07f )*Y+1.24621276265907E-06f )*Y-
4.30868944351523E-06f )*Y+9.94307982432868E-06f;
} else if (X < 20.f){
Y = X-17.5E+00f;
RT1 = (((((((((( 1.91875764545740E-16f*Y+7.8357401095707E-16f)*Y-
3.260875931644E-14f)*Y-1.186752035569E-13f)*Y+
4.275180095653E-12f)*Y+3.357056136731E-11f)*Y-
1.123776903884E-09f)*Y+1.231203269887E-08f)*Y-
3.99851421361031E-07f )*Y+1.45418822817771E-05f )*Y-
3.49912254976317E-04f )*Y+6.67768703938812E-03f;
RT2 = (((((((((( 2.02778478673555E-15f*Y+1.01640716785099E-14f)*Y-
3.385363492036E-13f)*Y-1.615655871159E-12f)*Y+
4.527419140333E-11f)*Y+3.853670706486E-10f)*Y-
1.184607130107E-08f)*Y+1.347873288827E-07f)*Y-
4.47788241748377E-06f )*Y+1.54942754358273E-04f )*Y-
3.55524254280266E-03f )*Y+6.44912219301603E-02f;
RT3 = (((((((((( 7.79850771456444E-15f*Y+6.00464406395001E-14f)*Y-
1.249779730869E-12f)*Y-1.020720636353E-11f)*Y+
1.814709816693E-10f)*Y+1.766397336977E-09f)*Y-
4.603559449010E-08f)*Y+5.863956443581E-07f)*Y-
2.03797212506691E-05f )*Y+6.31405161185185E-04f )*Y-
1.30102750145071E-02f )*Y+2.10244289044705E-01f;
RT4 = (((((((((((-2.92397030777912E-15f*Y+1.94152129078465E-14f)*Y+
4.859447665850E-13f)*Y-3.217227223463E-12f)*Y-
7.484522135512E-11f)*Y+7.19101516047753E-10f )*Y+
6.88409355245582E-09f )*Y-1.44374545515769E-07f )*Y+
2.74941013315834E-06f )*Y-1.02790452049013E-04f )*Y+
2.59924221372643E-03f )*Y-4.35712368303551E-02f )*Y+
5.62170709585029E-01f;
RT5 = ((((((((((( 1.17976126840060E-14f*Y+1.24156229350669E-13f)*Y-
3.892741622280E-12f)*Y-7.755793199043E-12f)*Y+
9.492190032313E-10f)*Y-4.98680128123353E-09f )*Y-
1.81502268782664E-07f )*Y+2.69463269394888E-06f )*Y+
2.50032154421640E-05f )*Y-1.33684303917681E-03f )*Y+
2.29121951862538E-02f )*Y-2.45653725061323E-01f )*Y+
1.89999883453047E+00f;
WW1 = (((((((((( 1.74841995087592E-15f*Y-6.95671892641256E-16f)*Y-
3.000659497257E-13f)*Y+2.021279817961E-13f)*Y+
3.853596935400E-11f)*Y+1.461418533652E-10f)*Y-
1.014517563435E-08f)*Y+1.132736008979E-07f)*Y-
2.86605475073259E-06f )*Y+1.21958354908768E-04f )*Y-
3.86293751153466E-03f )*Y+1.45298342081522E-01f;
WW2 = ((((((((((-1.11199320525573E-15f*Y+1.85007587796671E-15f)*Y+
1.220613939709E-13f)*Y+1.275068098526E-12f)*Y-
5.341838883262E-11f)*Y+6.161037256669E-10f)*Y-
1.009147879750E-08f)*Y+2.907862965346E-07f)*Y-
6.12300038720919E-06f )*Y+1.00104454489518E-04f )*Y-
1.80677298502757E-03f )*Y+5.78009914536630E-02f;
WW3 = ((((((((((-9.49816486853687E-16f*Y+6.67922080354234E-15f)*Y+
2.606163540537E-15f)*Y+1.983799950150E-12f)*Y-
5.400548574357E-11f)*Y+6.638043374114E-10f)*Y-
8.799518866802E-09f)*Y+1.791418482685E-07f)*Y-
2.96075397351101E-06f )*Y+3.38028206156144E-05f )*Y-
3.58426847857878E-04f )*Y+8.39213709428516E-03f;
WW4 = ((((((((((( 1.33829971060180E-17f*Y-3.44841877844140E-16f)*Y+
4.745009557656E-15f)*Y-6.033814209875E-14f)*Y+
1.049256040808E-12f)*Y-1.70859789556117E-11f )*Y+
2.15219425727959E-10f )*Y-2.52746574206884E-09f )*Y+
3.27761714422960E-08f )*Y-3.90387662925193E-07f )*Y+
3.46340204593870E-06f )*Y-2.43236345136782E-05f )*Y+
3.54846978585226E-04f;
WW5 = ((((((((((((( 2.69412277020887E-20f*Y-4.24837886165685E-19f)*
Y+6.030500065438E-18f)*Y-9.069722758289E-17f)*Y+
1.246599177672E-15f)*Y-1.56872999797549E-14f )*Y+
1.87305099552692E-13f )*Y-2.09498886675861E-12f )*Y+
2.11630022068394E-11f )*Y-1.92566242323525E-10f )*Y+
1.62012436344069E-09f )*Y-1.23621614171556E-08f )*Y+
7.72165684563049E-08f )*Y-3.59858901591047E-07f )*Y+
2.43682618601000E-06f;
} else if (X < 25.f) {
Y = X-22.5E+00f;
RT1 = (((((((((-1.13927848238726E-15f*Y+7.39404133595713E-15f)*Y+
1.445982921243E-13f)*Y-2.676703245252E-12f)*Y+
5.823521627177E-12f)*Y+2.17264723874381E-10f )*Y+
3.56242145897468E-09f )*Y-3.03763737404491E-07f )*Y+
9.46859114120901E-06f )*Y-2.30896753853196E-04f )*Y+
5.24663913001114E-03f;
RT2 = (((((((((( 2.89872355524581E-16f*Y-1.22296292045864E-14f)*Y+
6.184065097200E-14f)*Y+1.649846591230E-12f)*Y-
2.729713905266E-11f)*Y+3.709913790650E-11f)*Y+
2.216486288382E-09f)*Y+4.616160236414E-08f)*Y-
3.32380270861364E-06f )*Y+9.84635072633776E-05f )*Y-
2.30092118015697E-03f )*Y+5.00845183695073E-02f;
RT3 = (((((((((( 1.97068646590923E-15f*Y-4.89419270626800E-14f)*Y+
1.136466605916E-13f)*Y+7.546203883874E-12f)*Y-
9.635646767455E-11f)*Y-8.295965491209E-11f)*Y+
7.534109114453E-09f)*Y+2.699970652707E-07f)*Y-
1.42982334217081E-05f )*Y+3.78290946669264E-04f )*Y-
8.03133015084373E-03f )*Y+1.58689469640791E-01f;
RT4 = (((((((((( 1.33642069941389E-14f*Y-1.55850612605745E-13f)*Y-
7.522712577474E-13f)*Y+3.209520801187E-11f)*Y-
2.075594313618E-10f)*Y-2.070575894402E-09f)*Y+
7.323046997451E-09f)*Y+1.851491550417E-06f)*Y-
6.37524802411383E-05f )*Y+1.36795464918785E-03f )*Y-
2.42051126993146E-02f )*Y+3.97847167557815E-01f;
RT5 = ((((((((((-6.07053986130526E-14f*Y+1.04447493138843E-12f)*Y-
4.286617818951E-13f)*Y-2.632066100073E-10f)*Y+
4.804518986559E-09f)*Y-1.835675889421E-08f)*Y-
1.068175391334E-06f)*Y+3.292234974141E-05f)*Y-
5.94805357558251E-04f )*Y+8.29382168612791E-03f )*Y-
9.93122509049447E-02f )*Y+1.09857804755042E+00f;
WW1 = (((((((((-9.10338640266542E-15f*Y+1.00438927627833E-13f)*Y+
7.817349237071E-13f)*Y-2.547619474232E-11f)*Y+
1.479321506529E-10f)*Y+1.52314028857627E-09f )*Y+
9.20072040917242E-09f )*Y-2.19427111221848E-06f )*Y+
8.65797782880311E-05f )*Y-2.82718629312875E-03f )*Y+
1.28718310443295E-01f;
WW2 = ((((((((( 5.52380927618760E-15f*Y-6.43424400204124E-14f)*Y-
2.358734508092E-13f)*Y+8.261326648131E-12f)*Y+
9.229645304956E-11f)*Y-5.68108973828949E-09f )*Y+
1.22477891136278E-07f )*Y-2.11919643127927E-06f )*Y+
4.23605032368922E-05f )*Y-1.14423444576221E-03f )*Y+
5.06607252890186E-02f;
WW3 = ((((((((( 3.99457454087556E-15f*Y-5.11826702824182E-14f)*Y-
4.157593182747E-14f)*Y+4.214670817758E-12f)*Y+
6.705582751532E-11f)*Y-3.36086411698418E-09f )*Y+
6.07453633298986E-08f )*Y-7.40736211041247E-07f )*Y+
8.84176371665149E-06f )*Y-1.72559275066834E-04f )*Y+
7.16639814253567E-03f;
WW4 = (((((((((((-2.14649508112234E-18f*Y-2.45525846412281E-18f)*Y+
6.126212599772E-16f)*Y-8.526651626939E-15f)*Y+
4.826636065733E-14f)*Y-3.39554163649740E-13f )*Y+
1.67070784862985E-11f )*Y-4.42671979311163E-10f )*Y+
6.77368055908400E-09f )*Y-7.03520999708859E-08f )*Y+
6.04993294708874E-07f )*Y-7.80555094280483E-06f )*Y+
2.85954806605017E-04f;
WW5 = ((((((((((((-5.63938733073804E-21f*Y+6.92182516324628E-20f)*Y-
1.586937691507E-18f)*Y+3.357639744582E-17f)*Y-
4.810285046442E-16f)*Y+5.386312669975E-15f)*Y-
6.117895297439E-14f)*Y+8.441808227634E-13f)*Y-
1.18527596836592E-11f )*Y+1.36296870441445E-10f )*Y-
1.17842611094141E-09f )*Y+7.80430641995926E-09f )*Y-
5.97767417400540E-08f )*Y+1.65186146094969E-06f;
} else if (X < 40.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X);
RT1 = ((((((((-1.73363958895356E-06f*X+1.19921331441483E-04f)*X -
1.59437614121125E-02f)*X+1.13467897349442E+00f)*X -
4.47216460864586E+01f)*X+1.06251216612604E+03f)*X -
1.52073917378512E+04f)*X+1.20662887111273E+05f)*X -
4.07186366852475E+05f)*E + R15/(X-R15);
RT2 = ((((((((-1.60102542621710E-05f*X+1.10331262112395E-03f)*X -
1.50043662589017E-01f)*X+1.05563640866077E+01f)*X -
4.10468817024806E+02f)*X+9.62604416506819E+03f)*X -
1.35888069838270E+05f)*X+1.06107577038340E+06f)*X -
3.51190792816119E+06f)*E + R25/(X-R25);
RT3 = ((((((((-4.48880032128422E-05f*X+2.69025112122177E-03f)*X -
4.01048115525954E-01f)*X+2.78360021977405E+01f)*X -
1.04891729356965E+03f)*X+2.36985942687423E+04f)*X -
3.19504627257548E+05f)*X+2.34879693563358E+06f)*X -
7.16341568174085E+06f)*E + R35/(X-R35);
RT4 = ((((((((-6.38526371092582E-05f*X-2.29263585792626E-03f)*X -
7.65735935499627E-02f)*X+9.12692349152792E+00f)*X -
2.32077034386717E+02f)*X+2.81839578728845E+02f)*X +
9.59529683876419E+04f)*X-1.77638956809518E+06f)*X +
1.02489759645410E+07f)*E + R45/(X-R45);
RT5 = ((((((((-3.59049364231569E-05f*X-2.25963977930044E-02f)*X +
1.12594870794668E+00f)*X-4.56752462103909E+01f)*X +
1.05804526830637E+03f)*X-1.16003199605875E+04f)*X -
4.07297627297272E+04f)*X+2.22215528319857E+06f)*X -
1.61196455032613E+07f)*E + R55/(X-R55);
WW5 = (((((((((-4.61100906133970E-10f*X+1.43069932644286E-07f)*X -
1.63960915431080E-05f)*X+1.15791154612838E-03f)*X -
5.30573476742071E-02f)*X+1.61156533367153E+00f)*X -
3.23248143316007E+01f)*X+4.12007318109157E+02f)*X -
3.02260070158372E+03f)*X+9.71575094154768E+03f)*E + W55*WW1;
WW4 = (((((((((-2.40799435809950E-08f*X+8.12621667601546E-06f)*X -
9.04491430884113E-04f)*X+6.37686375770059E-02f)*X -
2.96135703135647E+00f)*X+9.15142356996330E+01f)*X -
1.86971865249111E+03f)*X+2.42945528916947E+04f)*X -
1.81852473229081E+05f)*X+5.96854758661427E+05f)*E + W45*WW1;
WW3 = (((((((( 1.83574464457207E-05f*X-1.54837969489927E-03f)*X +
1.18520453711586E-01f)*X-6.69649981309161E+00f)*X +
2.44789386487321E+02f)*X-5.68832664556359E+03f)*X +
8.14507604229357E+04f)*X-6.55181056671474E+05f)*X +
2.26410896607237E+06f)*E + W35*WW1;
WW2 = (((((((( 2.77778345870650E-05f*X-2.22835017655890E-03f)*X +
1.61077633475573E-01f)*X-8.96743743396132E+00f)*X +
3.28062687293374E+02f)*X-7.65722701219557E+03f)*X +
1.10255055017664E+05f)*X-8.92528122219324E+05f)*X +
3.10638627744347E+06f)*E + W25*WW1;
WW1 = WW1-0.01962E+00f*E-WW2-WW3-WW4-WW5;
} else if (X < 59.f) {
WW1 = sqrtf(PIE4/X);
XXX = powf(X,3.f);
E = XXX*expf(-X);
RT1 = (((-2.43758528330205E-02f*X+2.07301567989771E+00f)*X -
6.45964225381113E+01f)*X+7.14160088655470E+02f)*E + R15/(X-R15);
RT2 = (((-2.28861955413636E-01f*X+1.93190784733691E+01f)*X -
5.99774730340912E+02f)*X+6.61844165304871E+03f)*E + R25/(X-R25);
RT3 = (((-6.95053039285586E-01f*X+5.76874090316016E+01f)*X -
1.77704143225520E+03f)*X+1.95366082947811E+04f)*E + R35/(X-R35);
RT4 = (((-1.58072809087018E+00f*X+1.27050801091948E+02f)*X -
3.86687350914280E+03f)*X+4.23024828121420E+04f)*E + R45/(X-R45);
RT5 = (((-3.33963830405396E+00f*X+2.51830424600204E+02f)*X -
7.57728527654961E+03f)*X+8.21966816595690E+04f)*E + R55/(X-R55);
E = XXX*E;
WW5 = (( 1.35482430510942E-08f*X-3.27722199212781E-07f)*X +
2.41522703684296E-06f)*E + W55*WW1;
WW4 = (( 1.23464092261605E-06f*X-3.55224564275590E-05f)*X +
3.03274662192286E-04f)*E + W45*WW1;
WW3 = (( 1.34547929260279E-05f*X-4.19389884772726E-04f)*X +
3.87706687610809E-03f)*E + W35*WW1;
WW2 = (( 2.09539509123135E-05f*X-6.87646614786982E-04f)*X +
6.68743788585688E-03f)*E + W25*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
} else {
WW1 = sqrtf(PIE4/X);
RT1 = R15/(X-R15);
RT2 = R25/(X-R25);
RT3 = R35/(X-R35);
RT4 = R45/(X-R45);
RT5 = R55/(X-R55);
WW2 = W25*WW1;
WW3 = W35*WW1;
WW4 = W45*WW1;
WW5 = W55*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
roots[4] = RT5;
weights[4] = WW5;
return;
}
__device__ void cuda_Root6(int n,float X, float roots[], float weights[]){
// Root6 not implemented yet
return;
}
__device__ float cuda_Int1d(int i, int j, int k, int l,
float xi, float xj, float xk, float xl,
float alpha_ij_A, float alpha_kl_B, float sqrt_AB,
float A, float B, float Px, float Qx,
float inv_t1, float B00, float B1, float B1p,
float G[][MAXROOTS])
{
// Form G(n,m)=I(n,0,m,0) intermediate values for a Rys polynomial
int n = i+j;
int m = k+l;
float xij = xi-xj;
float xkl = xk-xl;
// RecurFactorsGamess
float C = (Px-xi) * inv_t1 + (B*(Qx-xi)+A*(Px-xi))*B00*2.0;
float Cp = (Qx-xk) * inv_t1 + (B*(Qx-xk)+A*(Px-xk))*B00*2.0;
// ABD eq 11.
G[0][0] = (float)M_PI * expf(-alpha_ij_A*xij*xij -alpha_kl_B*xkl*xkl) / sqrt_AB;
if (n > 0) { G[1][0] = C *G[0][0]; } // ABD eq 15
if (m > 0) { G[0][1] = Cp*G[0][0]; } // ABD eq 16
for (int a = 2; a < n+1; ++ a) { G[a][0] = B1 *(a-1)*G[a-2][0] + C *G[a-1][0]; }
for (int b = 2; b < m+1; ++ b) { G[0][b] = B1p*(b-1)*G[0][b-2] + Cp*G[0][b-1]; }
if ((m>0) && (n>0)){
for (int a=1; a<n+1; ++a){
G[a][1] = a*B00*G[a-1][0] + Cp*G[a][0];
for (int b=2; b<m+1; ++b)
G[a][b] = B1p*(b-1)*G[a][b-2] + a*B00*G[a-1][b-1] + Cp*G[a][b-1];
}
}
// Compute and output I(i,j,k,l) from I(i+j,0,k+l,0) (G)
float ijkl = 0.0;
for (int m=0; m<l+1; ++m){
float ijm0 = 0.0;
for (int n=0; n<j+1; ++n) // I(i,j,m,0)<-I(n,0,m,0)
ijm0 += cuda_binomial(j,n)*powf(xij,(float)(j-n))*G[n+i][m+k];
ijkl += cuda_binomial(l,m)*powf(xkl,(float)(l-m))*ijm0; // I(i,j,k,l)<-I(i,j,m,0)
}
return ijkl;
}
// calculate ERI over 4 primitive basis functions
__device__ float cuda_rys_pbf(const double *ptr_i, const double *ptr_j,
const double *ptr_k, const double *ptr_l)
{
// download xyz, lmn, expon, and coef*norm
float xa = (float)ptr_i[0];
float ya = (float)ptr_i[1];
float za = (float)ptr_i[2];
int la = (int)ptr_i[3];
int ma = (int)ptr_i[4];
int na = (int)ptr_i[5];
float alphaa = (float)ptr_i[6];
float norma = (float)ptr_i[7];
float xb = (float)ptr_j[0];
float yb = (float)ptr_j[1];
float zb = (float)ptr_j[2];
int lb = (int)ptr_j[3];
int mb = (int)ptr_j[4];
int nb = (int)ptr_j[5];
float alphab = (float)ptr_j[6];
float normb = (float)ptr_j[7];
float xc = (float)ptr_k[0];
float yc = (float)ptr_k[1];
float zc = (float)ptr_k[2];
int lc = (int)ptr_k[3];
int mc = (int)ptr_k[4];
int nc = (int)ptr_k[5];
float alphac = (float)ptr_k[6];
float normc = (float)ptr_k[7];
float xd = (float)ptr_l[0];
float yd = (float)ptr_l[1];
float zd = (float)ptr_l[2];
int ld = (int)ptr_l[3];
int md = (int)ptr_l[4];
int nd = (int)ptr_l[5];
float alphad = (float)ptr_l[6];
float normd = (float)ptr_l[7];
// calculate primitive integral [ij|kl]
int norder,i;
float A,B,xp,yp,zp,xq,yq,zq,X,rho,sum,t,Ix,Iy,Iz;
norder = (la+ma+na+lb+nb+mb+lc+mc+nc+ld+md+nd)/2 + 1;
A = alphaa+alphab;
B = alphac+alphad;
xp = (alphaa*xa+alphab*xb)/A;
yp = (alphaa*ya+alphab*yb)/A;
zp = (alphaa*za+alphab*zb)/A;
xq = (alphac*xc+alphad*xd)/B;
yq = (alphac*yc+alphad*yd)/B;
zq = (alphac*zc+alphad*zd)/B;
rho = A*B/(A+B);
X = rho * ((xp-xq)*(xp-xq)+(yp-yq)*(yp-yq)+(zp-zq)*(zp-zq));
float alpha_ab_A = alphaa * alphab / A;
float alpha_cd_B = alphac * alphad / B;
float sqrt_AB = sqrtf(A * B);
float roots[MAXROOTS],weights[MAXROOTS];
float G[MAXROOTS][MAXROOTS];
cuda_Roots(norder,X,roots,weights); // get currect roots/weights
sum = 0.;
for (i=0; i<norder; ++i){
t = roots[i];
float inv_t1, B00, B1, B1p;
inv_t1 = 1.f / (1.f + t);
B00 = 0.5f * t/(A+B) * inv_t1;
B1 = 0.5f / A * inv_t1 + B00;
B1p = 0.5f / B * inv_t1 + B00;
Ix = cuda_Int1d(la,lb,lc,ld, xa,xb,xc,xd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,xp,xq, inv_t1,B00,B1,B1p, G);
Iy = cuda_Int1d(ma,mb,mc,md, ya,yb,yc,yd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,yp,yq, inv_t1,B00,B1,B1p, G);
Iz = cuda_Int1d(na,nb,nc,nd, za,zb,zc,zd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,zp,zq, inv_t1,B00,B1,B1p, G);
sum = sum + Ix*Iy*Iz*weights[i]; /* ABD eq 5 & 9 */
}
// inv_sqrt_pi_2: 2.0*sqrtf(1.0/M_PI) = 1.12837916709551255856
return 1.12837916709551255856f * sqrtf(rho)*norma*normb*normc*normd*sum; /* ABD eq 5 & 9 */
}
// calculate J matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_J_PI(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_J_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_J_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [ij|??] and sum up to a primitive J matrix element
int i = blockIdx.x;
int j = blockIdx.y;
// avoid accessing out of bounds elements and make use of i<=>j symmetry
if (i >= n_pbf || j > i) { return; }
int ij = cuda_ij2intindex(i,j);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_j = &pbf_xlec[j * 8];
int a = pbf_to_cbf[i];
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
// initialize shared array
elem_J_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int k = threadIdx.x; k < n_pbf; k += BLOCKSIZE)
{
int c = pbf_to_cbf[k];
const double *ptr_k = &pbf_xlec[k * 8];
// NOTE: make use of k<=>l symmetry
for (int l = threadIdx.y; l <= k; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[cd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: doubling for off-diagonal elements of D due to k<=>l symmetry
elem_J_PI[threadIdx.x *BLOCKSIZE + threadIdx.y] += this_eri * mat_D[cd] * (k == l ? 1.0 : 2.0);
}
}
__syncthreads();
// only update mat_J_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_J_PI[ij] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_J_PI[ij] += elem_J_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
// calculate K matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_K_PI(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_K_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_K_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [i?|k?] and sum up to a primitive K matrix element
int i = blockIdx.x;
int k = blockIdx.y;
// avoid accessing out of bounds elements and make use of ij<=>kl symmetry
if (i >= n_pbf || k > i) { return; }
int ik = cuda_ij2intindex(i,k);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_k = &pbf_xlec[k * 8];
int a = pbf_to_cbf[i];
int c = pbf_to_cbf[k];
// initialize shared array
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int j = threadIdx.x; j < n_pbf; j += BLOCKSIZE)
{
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
const double *ptr_j = &pbf_xlec[j * 8];
for (int l = threadIdx.y; l < n_pbf; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
int bd = cuda_ij2intindex(b,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[bd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: no doubling for off-diagonal elements of D
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] += this_eri * mat_D[bd];
}
}
__syncthreads();
// only update mat_K_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_K_PI[ik] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_K_PI[ik] += elem_K_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
| b026f981424e387519eb94197fbc9b1a305ff59f.cu | /*****************************************************************************
This file is part of the XLQC program.
Copyright (C) 2015 Xin Li <[email protected]>
Filename: cuda_rys_sp.cu
License: BSD 3-Clause License
* The implementation of Rys quadrature routines in C is taken from the
* PyQuante quantum chemistry program, Copyright (c) 2004, Richard P. Muller.
* PyQuante version 1.2 and later is covered by the modified BSD license.
* Please see int_lib/LICENSE.
This software is provided by the copyright holders and contributors "as is"
and any express or implied warranties, including, but not limited to, the
implied warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall the copyright holder or contributors be liable
for any direct, indirect, incidental, special, exemplary, or consequential
damages (including, but not limited to, procurement of substitute goods or
services; loss of use, data, or profits; or business interruption) however
caused and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
*****************************************************************************/
#include <string>
#include "typedef.h"
#include "cuda_rys_sp.h"
void my_cuda_safe(cudaError_t err, std::string word)
{
if(err != cudaSuccess)
{
fprintf(stderr, "Error during %s: ", word.c_str());
// check for error
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
}
__device__ int cuda_ij2intindex(int i, int j)
{
if (i < j) {
int t = i; i = j; j = t;
}
return i * (i + 1) / 2 + j;
}
__device__ int cuda_fact(int n){
int result = 1;
for (int i = 2; i <= n; i++) result *= i;
return result;
}
__device__ int cuda_binomial(int a, int b){
return cuda_fact(a)/(cuda_fact(b)*cuda_fact(a-b));
}
__device__ void cuda_Roots(int n, float X, float roots[], float weights[]){
if (n <= 3)
cuda_Root123(n,X, roots,weights);
else if (n==4)
cuda_Root4(X, roots,weights);
else if (n==5)
cuda_Root5(X, roots,weights);
else
cuda_Root6(n,X, roots,weights);
return;
}
__device__ void cuda_Root123(int n, float X, float roots[], float weights[]){
float R12, PIE4, R22, W22, R13, R23, W23, R33, W33;
float RT1=0,RT2=0,RT3=0,WW1=0,WW2=0,WW3=0;
float F1,F2,E,T1,T2,T3,A1,A2,Y;
R12 = 2.75255128608411E-01f;
PIE4 = 7.85398163397448E-01f;
R22 = 2.72474487139158E+00f;
W22 = 9.17517095361369E-02f;
R13 = 1.90163509193487E-01f;
R23 = 1.78449274854325E+00f;
W23 = 1.77231492083829E-01f;
R33 = 5.52534374226326E+00f;
W33 = 5.11156880411248E-03f;
if (X < 3.e-7f){
if (n == 1){
RT1 = 0.5E+00f -X/5.0E+00f;
WW1 = 1.0E+00f -X/3.0E+00f;
} else if (n == 2) {
RT1 = 1.30693606237085E-01f -2.90430236082028E-02f *X;
RT2 = 2.86930639376291E+00f -6.37623643058102E-01f *X;
WW1 = 6.52145154862545E-01f -1.22713621927067E-01f *X;
WW2 = 3.47854845137453E-01f -2.10619711404725E-01f *X;
} else if (n == 3) {
RT1 = 6.03769246832797E-02f -9.28875764357368E-03f *X;
RT2 = 7.76823355931043E-01f -1.19511285527878E-01f *X;
RT3 = 6.66279971938567E+00f -1.02504611068957E+00f *X;
WW1 = 4.67913934572691E-01f -5.64876917232519E-02f *X;
WW2 = 3.60761573048137E-01f -1.49077186455208E-01f *X;
WW3 = 1.71324492379169E-01f -1.27768455150979E-01f *X;
}
} else if (X < 1.f) {
if (n == 1){
F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X-
1.15662609053481E-05f )*X+9.25197374512647E-05f )*X-
6.40994113129432E-04f )*X+3.78787044215009E-03f )*X-
1.85185172458485E-02f )*X+7.14285713298222E-02f )*X-
1.99999999997023E-01f )*X+3.33333333333318E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X-
1.15662609053481E-05f )*X+9.25197374512647E-05f )*X-
6.40994113129432E-04f )*X+3.78787044215009E-03f )*X-
1.85185172458485E-02f )*X+7.14285713298222E-02f )*X-
1.99999999997023E-01f )*X+3.33333333333318E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = (((((((-2.35234358048491E-09f*X+2.49173650389842E-08f)*X-
4.558315364581E-08f)*X-2.447252174587E-06f)*X+
4.743292959463E-05f)*X-5.33184749432408E-04f )*X+
4.44654947116579E-03f )*X-2.90430236084697E-02f )*X+
1.30693606237085E-01f;
RT2 = (((((((-2.47404902329170E-08f*X+2.36809910635906E-07f)*X+
1.835367736310E-06f)*X-2.066168802076E-05f)*X-
1.345693393936E-04f)*X-5.88154362858038E-05f )*X+
5.32735082098139E-02f )*X-6.37623643056745E-01f )*X+
2.86930639376289E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n==3){
RT1 = ((((((-5.10186691538870E-10f*X+2.40134415703450E-08f)*X-
5.01081057744427E-07f )*X+7.58291285499256E-06f )*X-
9.55085533670919E-05f )*X+1.02893039315878E-03f )*X-
9.28875764374337E-03f )*X+6.03769246832810E-02f;
RT2 = ((((((-1.29646524960555E-08f*X+7.74602292865683E-08f)*X+
1.56022811158727E-06f )*X-1.58051990661661E-05f )*X-
3.30447806384059E-04f )*X+9.74266885190267E-03f )*X-
1.19511285526388E-01f )*X+7.76823355931033E-01f;
RT3 = ((((((-9.28536484109606E-09f*X-3.02786290067014E-07f)*X-
2.50734477064200E-06f )*X-7.32728109752881E-06f )*X+
2.44217481700129E-04f )*X+4.94758452357327E-02f )*X-
1.02504611065774E+00f )*X+6.66279971938553E+00f;
F2 = ((((((((-7.60911486098850E-08f*X+1.09552870123182E-06f )*X-
1.03463270693454E-05f )*X+8.16324851790106E-05f )*X-
5.55526624875562E-04f )*X+3.20512054753924E-03f )*X-
1.51515139838540E-02f )*X+5.55555554649585E-02f )*X-
1.42857142854412E-01f )*X+1.99999999999986E-01f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 3.f) {
Y = X-2.0E+00f;
if (n == 1) {
F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y-
2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y-
1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y-
1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y-
3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y-
5.29428148329736E-02f )*Y+1.15702180856167E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y-
2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y-
1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y-
1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y-
3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y-
5.29428148329736E-02f )*Y+1.15702180856167E-01f;
WW1 = (X+X)*F1+expf(-X);
RT1 = (((((((((-6.36859636616415E-12f*Y+8.47417064776270E-11f)*Y-
5.152207846962E-10f)*Y-3.846389873308E-10f)*Y+
8.472253388380E-08f)*Y-1.85306035634293E-06f )*Y+
2.47191693238413E-05f )*Y-2.49018321709815E-04f )*Y+
2.19173220020161E-03f )*Y-1.63329339286794E-02f )*Y+
8.68085688285261E-02f;
RT2 = ((((((((( 1.45331350488343E-10f*Y+2.07111465297976E-09f)*Y-
1.878920917404E-08f)*Y-1.725838516261E-07f)*Y+
2.247389642339E-06f)*Y+9.76783813082564E-06f )*Y-
1.93160765581969E-04f )*Y-1.58064140671893E-03f )*Y+
4.85928174507904E-02f )*Y-4.30761584997596E-01f )*Y+
1.80400974537950E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = (((((((( 1.44687969563318E-12f*Y+4.85300143926755E-12f)*Y-
6.55098264095516E-10f )*Y+1.56592951656828E-08f )*Y-
2.60122498274734E-07f )*Y+3.86118485517386E-06f )*Y-
5.13430986707889E-05f )*Y+6.03194524398109E-04f )*Y-
6.11219349825090E-03f )*Y+4.52578254679079E-02f;
RT2 = ((((((( 6.95964248788138E-10f*Y-5.35281831445517E-09f)*Y-
6.745205954533E-08f)*Y+1.502366784525E-06f)*Y+
9.923326947376E-07f)*Y-3.89147469249594E-04f )*Y+
7.51549330892401E-03f )*Y-8.48778120363400E-02f )*Y+
5.73928229597613E-01f;
RT3 = ((((((((-2.81496588401439E-10f*Y+3.61058041895031E-09f)*Y+
4.53631789436255E-08f )*Y-1.40971837780847E-07f )*Y-
6.05865557561067E-06f )*Y-5.15964042227127E-05f )*Y+
3.34761560498171E-05f )*Y+5.04871005319119E-02f )*Y-
8.24708946991557E-01f )*Y+4.81234667357205E+00f;
F2 = ((((((((((-1.48044231072140E-10f*Y+1.78157031325097E-09f )*Y-
1.92514145088973E-08f )*Y+1.92804632038796E-07f )*Y-
1.73806555021045E-06f )*Y+1.39195169625425E-05f )*Y-
9.74574633246452E-05f )*Y+5.83701488646511E-04f )*Y-
2.89955494844975E-03f )*Y+1.13847001113810E-02f )*Y-
3.23446977320647E-02f )*Y+5.29428148329709E-02f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 5.f){
Y = X-4.0E+00f;
if (n == 1){
F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y-
3.614965656163E-09f)*Y+3.760256799971E-08f)*Y-
3.553558319675E-07f)*Y+3.022556449731E-06f)*Y-
2.290098979647E-05f)*Y+1.526537461148E-04f)*Y-
8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y-
1.75257821619926E-02f )*Y+5.28406320615584E-02f;
WW1 = (X+X)*F1+expf(-X);
RT1 = F1/(WW1-F1);
} else if (n == 2) {
F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y-
3.614965656163E-09f)*Y+3.760256799971E-08f)*Y-
3.553558319675E-07f)*Y+3.022556449731E-06f)*Y-
2.290098979647E-05f)*Y+1.526537461148E-04f)*Y-
8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y-
1.75257821619926E-02f )*Y+5.28406320615584E-02f;
WW1 = (X+X)*F1+expf(-X);
RT1 = ((((((((-4.11560117487296E-12f*Y+7.10910223886747E-11f)*Y-
1.73508862390291E-09f )*Y+5.93066856324744E-08f )*Y-
9.76085576741771E-07f )*Y+1.08484384385679E-05f )*Y-
1.12608004981982E-04f )*Y+1.16210907653515E-03f )*Y-
9.89572595720351E-03f )*Y+6.12589701086408E-02f;
RT2 = (((((((((-1.80555625241001E-10f*Y+5.44072475994123E-10f)*Y+
1.603498045240E-08f)*Y-1.497986283037E-07f)*Y-
7.017002532106E-07f)*Y+1.85882653064034E-05f )*Y-
2.04685420150802E-05f )*Y-2.49327728643089E-03f )*Y+
3.56550690684281E-02f )*Y-2.60417417692375E-01f )*Y+
1.12155283108289E+00f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
RT1 = ((((((( 1.44265709189601E-11f*Y-4.66622033006074E-10f)*Y+
7.649155832025E-09f)*Y-1.229940017368E-07f)*Y+
2.026002142457E-06f)*Y-2.87048671521677E-05f )*Y+
3.70326938096287E-04f )*Y-4.21006346373634E-03f )*Y+
3.50898470729044E-02f;
RT2 = ((((((((-2.65526039155651E-11f*Y+1.97549041402552E-10f)*Y+
2.15971131403034E-09f )*Y-7.95045680685193E-08f )*Y+
5.15021914287057E-07f )*Y+1.11788717230514E-05f )*Y-
3.33739312603632E-04f )*Y+5.30601428208358E-03f )*Y-
5.93483267268959E-02f )*Y+4.31180523260239E-01f;
RT3 = ((((((((-3.92833750584041E-10f*Y-4.16423229782280E-09f)*Y+
4.42413039572867E-08f )*Y+6.40574545989551E-07f )*Y-
3.05512456576552E-06f )*Y-1.05296443527943E-04f )*Y-
6.14120969315617E-04f )*Y+4.89665802767005E-02f )*Y-
6.24498381002855E-01f )*Y+3.36412312243724E+00f;
F2 = ((((((((((-2.36788772599074E-11f*Y+2.89147476459092E-10f )*Y-
3.18111322308846E-09f )*Y+3.25336816562485E-08f )*Y-
3.00873821471489E-07f )*Y+2.48749160874431E-06f )*Y-
1.81353179793672E-05f )*Y+1.14504948737066E-04f )*Y-
6.10614987696677E-04f )*Y+2.64584212770942E-03f )*Y-
8.66415899015349E-03f )*Y+1.75257821619922E-02f;
E = expf(-X);
F1 = ((X+X)*F2+E)/3.0E+00f;
WW1 = (X+X)*F1+E;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 10.f) {
E = expf(-X);
WW1 = (((((( 4.6897511375022E-01f/X-6.9955602298985E-01f)/X +
5.3689283271887E-01f)/X-3.2883030418398E-01f)/X +
2.4645596956002E-01f)/X-4.9984072848436E-01f)/X -
3.1501078774085E-06f)*E + sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
Y = X-7.5E+00f;
RT1 = (((((((((((((-1.43632730148572E-16f*Y+2.38198922570405E-16f)*
Y+1.358319618800E-14f)*Y-7.064522786879E-14f)*Y-
7.719300212748E-13f)*Y+7.802544789997E-12f)*Y+
6.628721099436E-11f)*Y-1.775564159743E-09f)*Y+
1.713828823990E-08f)*Y-1.497500187053E-07f)*Y+
2.283485114279E-06f)*Y-3.76953869614706E-05f )*Y+
4.74791204651451E-04f )*Y-4.60448960876139E-03f )*Y+
3.72458587837249E-02f;
RT2 = (((((((((((( 2.48791622798900E-14f*Y-1.36113510175724E-13f)*Y-
2.224334349799E-12f)*Y+4.190559455515E-11f)*Y-
2.222722579924E-10f)*Y-2.624183464275E-09f)*Y+
6.128153450169E-08f)*Y-4.383376014528E-07f)*Y-
2.49952200232910E-06f )*Y+1.03236647888320E-04f )*Y-
1.44614664924989E-03f )*Y+1.35094294917224E-02f )*Y-
9.53478510453887E-02f )*Y+5.44765245686790E-01f;
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-7.5E+00f;
RT1 = ((((((((((( 5.74429401360115E-16f*Y+7.11884203790984E-16f)*Y-
6.736701449826E-14f)*Y-6.264613873998E-13f)*Y+
1.315418927040E-11f)*Y-4.23879635610964E-11f )*Y+
1.39032379769474E-09f )*Y-4.65449552856856E-08f )*Y+
7.34609900170759E-07f )*Y-1.08656008854077E-05f )*Y+
1.77930381549953E-04f )*Y-2.39864911618015E-03f )*Y+
2.39112249488821E-02f;
RT2 = ((((((((((( 1.13464096209120E-14f*Y+6.99375313934242E-15f)*Y-
8.595618132088E-13f)*Y-5.293620408757E-12f)*Y-
2.492175211635E-11f)*Y+2.73681574882729E-09f )*Y-
1.06656985608482E-08f )*Y-4.40252529648056E-07f )*Y+
9.68100917793911E-06f )*Y-1.68211091755327E-04f )*Y+
2.69443611274173E-03f )*Y-3.23845035189063E-02f )*Y+
2.75969447451882E-01f;
RT3 = (((((((((((( 6.66339416996191E-15f*Y+1.84955640200794E-13f)*Y-
1.985141104444E-12f)*Y-2.309293727603E-11f)*Y+
3.917984522103E-10f)*Y+1.663165279876E-09f)*Y-
6.205591993923E-08f)*Y+8.769581622041E-09f)*Y+
8.97224398620038E-06f )*Y-3.14232666170796E-05f )*Y-
1.83917335649633E-03f )*Y+3.51246831672571E-02f )*Y-
3.22335051270860E-01f )*Y+1.73582831755430E+00f;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 15.f) {
E = expf(-X);
WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X -
4.9893752514047E-01f)/X-2.1916512131607E-05f)*E
+ sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2) {
RT1 = ((((-1.01041157064226E-05f*X+1.19483054115173E-03f)*X -
6.73760231824074E-02f)*X+1.25705571069895E+00f)*X +
(((-8.57609422987199E+03f/X+5.91005939591842E+03f)/X -
1.70807677109425E+03f)/X+2.64536689959503E+02f)/X -
2.38570496490846E+01f)*E + R12/(X-R12);
RT2 = ((( 3.39024225137123E-04f*X-9.34976436343509E-02f)*X -
4.22216483306320E+00f)*X +
(((-2.08457050986847E+03f/X -
1.04999071905664E+03f)/X+3.39891508992661E+02f)/X -
1.56184800325063E+02f)/X+8.00839033297501E+00f)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
Y = X-12.5E+00f;
RT1 = ((((((((((( 4.42133001283090E-16f*Y-2.77189767070441E-15f)*Y-
4.084026087887E-14f)*Y+5.379885121517E-13f)*Y+
1.882093066702E-12f)*Y-8.67286219861085E-11f )*Y+
7.11372337079797E-10f )*Y-3.55578027040563E-09f )*Y+
1.29454702851936E-07f )*Y-4.14222202791434E-06f )*Y+
8.04427643593792E-05f )*Y-1.18587782909876E-03f )*Y+
1.53435577063174E-02f;
RT2 = ((((((((((( 6.85146742119357E-15f*Y-1.08257654410279E-14f)*Y-
8.579165965128E-13f)*Y+6.642452485783E-12f)*Y+
4.798806828724E-11f)*Y-1.13413908163831E-09f )*Y+
7.08558457182751E-09f )*Y-5.59678576054633E-08f )*Y+
2.51020389884249E-06f )*Y-6.63678914608681E-05f )*Y+
1.11888323089714E-03f )*Y-1.45361636398178E-02f )*Y+
1.65077877454402E-01f;
RT3 = (((((((((((( 3.20622388697743E-15f*Y-2.73458804864628E-14f)*Y-
3.157134329361E-13f)*Y+8.654129268056E-12f)*Y-
5.625235879301E-11f)*Y-7.718080513708E-10f)*Y+
2.064664199164E-08f)*Y-1.567725007761E-07f)*Y-
1.57938204115055E-06f )*Y+6.27436306915967E-05f )*Y-
1.01308723606946E-03f )*Y+1.13901881430697E-02f )*Y-
1.01449652899450E-01f )*Y+7.77203937334739E-01f;
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else if (X < 33.f) {
E = expf(-X);
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*E + sqrtf(PIE4/X);
F1 = (WW1-E)/(X+X);
if (n == 1)
RT1 = F1/(WW1-F1);
else if (n == 2){
RT1 = ((((-1.14906395546354E-06f*X+1.76003409708332E-04f)*X -
1.71984023644904E-02f)*X-1.37292644149838E-01f)*X +
(-4.75742064274859E+01f/X+9.21005186542857E+00f)/X -
2.31080873898939E-02f)*E + R12/(X-R12);
RT2 = ((( 3.64921633404158E-04f*X-9.71850973831558E-02f)*X -
4.02886174850252E+00f)*X +
(-1.35831002139173E+02f/X -
8.66891724287962E+01f)/X+2.98011277766958E+00f)*E + R22/(X-R22);
WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1);
WW1 = WW1-WW2;
} else if (n == 3) {
F2 = (F1+F1+F1-E)/(X+X);
if (X < 20.f) {
RT1 = ((((((-2.43270989903742E-06f*X+3.57901398988359E-04f)*X -
2.34112415981143E-02f)*X+7.81425144913975E-01f)*X -
1.73209218219175E+01f)*X+2.43517435690398E+02f)*X +
(-1.97611541576986E+04f/X+9.82441363463929E+03f)/X -
2.07970687843258E+03f)*E + R13/(X-R13);
RT2 = (((((-2.62627010965435E-04f*X+3.49187925428138E-02f)*X -
3.09337618731880E+00f)*X+1.07037141010778E+02f)*X -
2.36659637247087E+03f)*X +
((-2.91669113681020E+06f/X +
1.41129505262758E+06f)/X-2.91532335433779E+05f)/X +
3.35202872835409E+04f)*E + R23/(X-R23);
RT3 = ((((( 9.31856404738601E-05f*X-2.87029400759565E-02f)*X -
7.83503697918455E-01f)*X-1.84338896480695E+01f)*X +
4.04996712650414E+02f)*X +
(-1.89829509315154E+05f/X +
5.11498390849158E+04f)/X-6.88145821789955E+03f)*E
+ R33/(X-R33);
} else {
RT1 = ((((-4.97561537069643E-04f*X-5.00929599665316E-02f)*X +
1.31099142238996E+00f)*X-1.88336409225481E+01f)*X -
6.60344754467191E+02f /X+1.64931462413877E+02f)*E
+ R13/(X-R13);
RT2 = ((((-4.48218898474906E-03f*X-5.17373211334924E-01f)*X +
1.13691058739678E+01f)*X-1.65426392885291E+02f)*X -
6.30909125686731E+03f /X+1.52231757709236E+03f)*E
+ R23/(X-R23);
RT3 = ((((-1.38368602394293E-02f*X-1.77293428863008E+00f)*X +
1.73639054044562E+01f)*X-3.57615122086961E+02f)*X -
1.45734701095912E+04f /X+2.69831813951849E+03f)*E
+ R33/(X-R33);
}
T1 = RT1/(RT1+1.0E+00f);
T2 = RT2/(RT2+1.0E+00f);
T3 = RT3/(RT3+1.0E+00f);
A2 = F2-T1*F1;
A1 = F1-T1*WW1;
WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1));
WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1));
WW1 = WW1-WW2-WW3;
}
} else {
WW1 = sqrtf(PIE4/X);
if (n == 1)
RT1 = 0.5E+00f/(X-0.5E+00f);
else if (n == 2) {
if (X < 40.f) {
E = expf(-X);
RT1 = (-8.78947307498880E-01f*X+1.09243702330261E+01f)*E
+ R12/(X-R12);
RT2 = (-9.28903924275977E+00f*X+8.10642367843811E+01f)*E
+ R22/(X-R22);
WW2 = ( 4.46857389308400E+00f*X-7.79250653461045E+01f)*E + W22*WW1;
WW1 = WW1-WW2;
} else {
RT1 = R12/(X-R12);
RT2 = R22/(X-R22);
WW2 = W22*WW1;
WW1 = WW1-WW2;
}
} else if (n == 3) {
if (X < 47.f) {
E = expf(-X);
RT1 = ((-7.39058467995275E+00f*X+3.21318352526305E+02f)*X -
3.99433696473658E+03f)*E + R13/(X-R13);
RT2 = ((-7.38726243906513E+01f*X+3.13569966333873E+03f)*X -
3.86862867311321E+04f)*E + R23/(X-R23);
RT3 = ((-2.63750565461336E+02f*X+1.04412168692352E+04f)*X -
1.28094577915394E+05f)*E + R33/(X-R33);
WW3 = ((( 1.52258947224714E-01f*X-8.30661900042651E+00f)*X +
1.92977367967984E+02f)*X-1.67787926005344E+03f)*E
+ W33*WW1;
WW2 = (( 6.15072615497811E+01f*X-2.91980647450269E+03f)*X +
3.80794303087338E+04f)*E + W23*WW1;
WW1 = WW1-WW2-WW3;
} else {
RT1 = R13/(X-R13);
RT2 = R23/(X-R23);
RT3 = R33/(X-R33);
WW2 = W23*WW1;
WW3 = W33*WW1;
WW1 = WW1-WW2-WW3;
}
}
}
roots[0] = RT1;
weights[0] = WW1;
if (n > 1){
roots[1] = RT2;
weights[1] = WW2;
}
if (n > 2) {
roots[2] = RT3;
weights[2] = WW3;
}
return;
}
__device__ void cuda_Root4(float X, float roots[], float weights[]){
float R14,PIE4,R24,W24,R34,W34,R44,W44;
float RT1=0,RT2=0,RT3=0,RT4=0,WW1=0,WW2=0,WW3=0,WW4=0;
float Y,E;
R14 = 1.45303521503316E-01f;
PIE4 = 7.85398163397448E-01f;
R24 = 1.33909728812636E+00f;
W24 = 2.34479815323517E-01f;
R34 = 3.92696350135829E+00f;
W34 = 1.92704402415764E-02f;
R44 = 8.58863568901199E+00f;
W44 = 2.25229076750736E-04f;
if (X <= 3.0E-7f) {
RT1 = 3.48198973061471E-02f -4.09645850660395E-03f *X;
RT2 = 3.81567185080042E-01f -4.48902570656719E-02f *X;
RT3 = 1.73730726945891E+00f -2.04389090547327E-01f *X;
RT4 = 1.18463056481549E+01f -1.39368301742312E+00f *X;
WW1 = 3.62683783378362E-01f -3.13844305713928E-02f *X;
WW2 = 3.13706645877886E-01f -8.98046242557724E-02f *X;
WW3 = 2.22381034453372E-01f -1.29314370958973E-01f *X;
WW4 = 1.01228536290376E-01f -8.28299075414321E-02f *X;
} else if (X <= 1.f) {
RT1 = ((((((-1.95309614628539E-10f*X+5.19765728707592E-09f)*X-
1.01756452250573E-07f )*X+1.72365935872131E-06f )*X-
2.61203523522184E-05f )*X+3.52921308769880E-04f )*X-
4.09645850658433E-03f )*X+3.48198973061469E-02f;
RT2 = (((((-1.89554881382342E-08f*X+3.07583114342365E-07f)*X+
1.270981734393E-06f)*X-1.417298563884E-04f)*X+
3.226979163176E-03f)*X-4.48902570678178E-02f )*X+
3.81567185080039E-01f;
RT3 = (((((( 1.77280535300416E-09f*X+3.36524958870615E-08f)*X-
2.58341529013893E-07f )*X-1.13644895662320E-05f )*X-
7.91549618884063E-05f )*X+1.03825827346828E-02f )*X-
2.04389090525137E-01f )*X+1.73730726945889E+00f;
RT4 = (((((-5.61188882415248E-08f*X-2.49480733072460E-07f)*X+
3.428685057114E-06f)*X+1.679007454539E-04f)*X+
4.722855585715E-02f)*X-1.39368301737828E+00f )*X+
1.18463056481543E+01f;
WW1 = ((((((-1.14649303201279E-08f*X+1.88015570196787E-07f)*X-
2.33305875372323E-06f )*X+2.68880044371597E-05f )*X-
2.94268428977387E-04f )*X+3.06548909776613E-03f )*X-
3.13844305680096E-02f )*X+3.62683783378335E-01f;
WW2 = ((((((((-4.11720483772634E-09f*X+6.54963481852134E-08f)*X-
7.20045285129626E-07f )*X+6.93779646721723E-06f )*X-
6.05367572016373E-05f )*X+4.74241566251899E-04f )*X-
3.26956188125316E-03f )*X+1.91883866626681E-02f )*X-
8.98046242565811E-02f )*X+3.13706645877886E-01f;
WW3 = ((((((((-3.41688436990215E-08f*X+5.07238960340773E-07f)*X-
5.01675628408220E-06f )*X+4.20363420922845E-05f )*X-
3.08040221166823E-04f )*X+1.94431864731239E-03f )*X-
1.02477820460278E-02f )*X+4.28670143840073E-02f )*X-
1.29314370962569E-01f )*X+2.22381034453369E-01f;
WW4 = ((((((((( 4.99660550769508E-09f*X-7.94585963310120E-08f)*X+
8.359072409485E-07f)*X-7.422369210610E-06f)*X+
5.763374308160E-05f)*X-3.86645606718233E-04f )*X+
2.18417516259781E-03f )*X-9.99791027771119E-03f )*X+
3.48791097377370E-02f )*X-8.28299075413889E-02f )*X+
1.01228536290376E-01f;
} else if (X <= 5.f) {
Y = X-3.0E+00f;
RT1 = (((((((((-1.48570633747284E-15f*Y-1.33273068108777E-13f)*Y+
4.068543696670E-12f)*Y-9.163164161821E-11f)*Y+
2.046819017845E-09f)*Y-4.03076426299031E-08f )*Y+
7.29407420660149E-07f )*Y-1.23118059980833E-05f )*Y+
1.88796581246938E-04f )*Y-2.53262912046853E-03f )*Y+
2.51198234505021E-02f;
RT2 = ((((((((( 1.35830583483312E-13f*Y-2.29772605964836E-12f)*Y-
3.821500128045E-12f)*Y+6.844424214735E-10f)*Y-
1.048063352259E-08f)*Y+1.50083186233363E-08f )*Y+
3.48848942324454E-06f )*Y-1.08694174399193E-04f )*Y+
2.08048885251999E-03f )*Y-2.91205805373793E-02f )*Y+
2.72276489515713E-01f;
RT3 = ((((((((( 5.02799392850289E-13f*Y+1.07461812944084E-11f)*Y-
1.482277886411E-10f)*Y-2.153585661215E-09f)*Y+
3.654087802817E-08f)*Y+5.15929575830120E-07f )*Y-
9.52388379435709E-06f )*Y-2.16552440036426E-04f )*Y+
9.03551469568320E-03f )*Y-1.45505469175613E-01f )*Y+
1.21449092319186E+00f;
RT4 = (((((((((-1.08510370291979E-12f*Y+6.41492397277798E-11f)*Y+
7.542387436125E-10f)*Y-2.213111836647E-09f)*Y-
1.448228963549E-07f)*Y-1.95670833237101E-06f )*Y-
1.07481314670844E-05f )*Y+1.49335941252765E-04f )*Y+
4.87791531990593E-02f )*Y-1.10559909038653E+00f )*Y+
8.09502028611780E+00f;
WW1 = ((((((((((-4.65801912689961E-14f*Y+7.58669507106800E-13f)*Y-
1.186387548048E-11f)*Y+1.862334710665E-10f)*Y-
2.799399389539E-09f)*Y+4.148972684255E-08f)*Y-
5.933568079600E-07f)*Y+8.168349266115E-06f)*Y-
1.08989176177409E-04f )*Y+1.41357961729531E-03f )*Y-
1.87588361833659E-02f )*Y+2.89898651436026E-01f;
WW2 = ((((((((((((-1.46345073267549E-14f*Y+2.25644205432182E-13f)*Y-
3.116258693847E-12f)*Y+4.321908756610E-11f)*Y-
5.673270062669E-10f)*Y+7.006295962960E-09f)*Y-
8.120186517000E-08f)*Y+8.775294645770E-07f)*Y-
8.77829235749024E-06f )*Y+8.04372147732379E-05f )*Y-
6.64149238804153E-04f )*Y+4.81181506827225E-03f )*Y-
2.88982669486183E-02f )*Y+1.56247249979288E-01f;
WW3 = ((((((((((((( 9.06812118895365E-15f*Y-1.40541322766087E-13f)*
Y+1.919270015269E-12f)*Y-2.605135739010E-11f)*Y+
3.299685839012E-10f)*Y-3.86354139348735E-09f )*Y+
4.16265847927498E-08f )*Y-4.09462835471470E-07f )*Y+
3.64018881086111E-06f )*Y-2.88665153269386E-05f )*Y+
2.00515819789028E-04f )*Y-1.18791896897934E-03f )*Y+
5.75223633388589E-03f )*Y-2.09400418772687E-02f )*Y+
4.85368861938873E-02f;
WW4 = ((((((((((((((-9.74835552342257E-16f*Y+1.57857099317175E-14f)*
Y-2.249993780112E-13f)*Y+3.173422008953E-12f)*Y-
4.161159459680E-11f)*Y+5.021343560166E-10f)*Y-
5.545047534808E-09f)*Y+5.554146993491E-08f)*Y-
4.99048696190133E-07f )*Y+3.96650392371311E-06f )*Y-
2.73816413291214E-05f )*Y+1.60106988333186E-04f )*Y-
7.64560567879592E-04f )*Y+2.81330044426892E-03f )*Y-
7.16227030134947E-03f )*Y+9.66077262223353E-03f;
} else if (X <= 10.f) {
Y = X-7.5E+00f;
RT1 = ((((((((( 4.64217329776215E-15f*Y-6.27892383644164E-15f)*Y+
3.462236347446E-13f)*Y-2.927229355350E-11f)*Y+
5.090355371676E-10f)*Y-9.97272656345253E-09f )*Y+
2.37835295639281E-07f )*Y-4.60301761310921E-06f )*Y+
8.42824204233222E-05f )*Y-1.37983082233081E-03f )*Y+
1.66630865869375E-02f;
RT2 = ((((((((( 2.93981127919047E-14f*Y+8.47635639065744E-13f)*Y-
1.446314544774E-11f)*Y-6.149155555753E-12f)*Y+
8.484275604612E-10f)*Y-6.10898827887652E-08f )*Y+
2.39156093611106E-06f )*Y-5.35837089462592E-05f )*Y+
1.00967602595557E-03f )*Y-1.57769317127372E-02f )*Y+
1.74853819464285E-01f;
RT3 = (((((((((( 2.93523563363000E-14f*Y-6.40041776667020E-14f)*Y-
2.695740446312E-12f)*Y+1.027082960169E-10f)*Y-
5.822038656780E-10f)*Y-3.159991002539E-08f)*Y+
4.327249251331E-07f)*Y+4.856768455119E-06f)*Y-
2.54617989427762E-04f )*Y+5.54843378106589E-03f )*Y-
7.95013029486684E-02f )*Y+7.20206142703162E-01f;
RT4 = (((((((((((-1.62212382394553E-14f*Y+7.68943641360593E-13f)*Y+
5.764015756615E-12f)*Y-1.380635298784E-10f)*Y-
1.476849808675E-09f)*Y+1.84347052385605E-08f )*Y+
3.34382940759405E-07f )*Y-1.39428366421645E-06f )*Y-
7.50249313713996E-05f )*Y-6.26495899187507E-04f )*Y+
4.69716410901162E-02f )*Y-6.66871297428209E-01f )*Y+
4.11207530217806E+00f;
WW1 = ((((((((((-1.65995045235997E-15f*Y+6.91838935879598E-14f)*Y-
9.131223418888E-13f)*Y+1.403341829454E-11f)*Y-
3.672235069444E-10f)*Y+6.366962546990E-09f)*Y-
1.039220021671E-07f)*Y+1.959098751715E-06f)*Y-
3.33474893152939E-05f )*Y+5.72164211151013E-04f )*Y-
1.05583210553392E-02f )*Y+2.26696066029591E-01f;
WW2 = ((((((((((((-3.57248951192047E-16f*Y+6.25708409149331E-15f)*Y-
9.657033089714E-14f)*Y+1.507864898748E-12f)*Y-
2.332522256110E-11f)*Y+3.428545616603E-10f)*Y-
4.698730937661E-09f)*Y+6.219977635130E-08f)*Y-
7.83008889613661E-07f )*Y+9.08621687041567E-06f )*Y-
9.86368311253873E-05f )*Y+9.69632496710088E-04f )*Y-
8.14594214284187E-03f )*Y+8.50218447733457E-02f;
WW3 = ((((((((((((( 1.64742458534277E-16f*Y-2.68512265928410E-15f)*
Y+3.788890667676E-14f)*Y-5.508918529823E-13f)*Y+
7.555896810069E-12f)*Y-9.69039768312637E-11f )*Y+
1.16034263529672E-09f )*Y-1.28771698573873E-08f )*Y+
1.31949431805798E-07f )*Y-1.23673915616005E-06f )*Y+
1.04189803544936E-05f )*Y-7.79566003744742E-05f )*Y+
5.03162624754434E-04f )*Y-2.55138844587555E-03f )*Y+
1.13250730954014E-02f;
WW4 = ((((((((((((((-1.55714130075679E-17f*Y+2.57193722698891E-16f)*
Y-3.626606654097E-15f)*Y+5.234734676175E-14f)*Y-
7.067105402134E-13f)*Y+8.793512664890E-12f)*Y-
1.006088923498E-10f)*Y+1.050565098393E-09f)*Y-
9.91517881772662E-09f )*Y+8.35835975882941E-08f )*Y-
6.19785782240693E-07f )*Y+3.95841149373135E-06f )*Y-
2.11366761402403E-05f )*Y+9.00474771229507E-05f )*Y-
2.78777909813289E-04f )*Y+5.26543779837487E-04f;
} else if (X <= 15.f) {
Y = X-12.5E+00f;
RT1 = ((((((((((( 4.94869622744119E-17f*Y+8.03568805739160E-16f)*Y-
5.599125915431E-15f)*Y-1.378685560217E-13f)*Y+
7.006511663249E-13f)*Y+1.30391406991118E-11f )*Y+
8.06987313467541E-11f )*Y-5.20644072732933E-09f )*Y+
7.72794187755457E-08f )*Y-1.61512612564194E-06f )*Y+
4.15083811185831E-05f )*Y-7.87855975560199E-04f )*Y+
1.14189319050009E-02f;
RT2 = ((((((((((( 4.89224285522336E-16f*Y+1.06390248099712E-14f)*Y-
5.446260182933E-14f)*Y-1.613630106295E-12f)*Y+
3.910179118937E-12f)*Y+1.90712434258806E-10f )*Y+
8.78470199094761E-10f )*Y-5.97332993206797E-08f )*Y+
9.25750831481589E-07f )*Y-2.02362185197088E-05f )*Y+
4.92341968336776E-04f )*Y-8.68438439874703E-03f )*Y+
1.15825965127958E-01f;
RT3 = (((((((((( 6.12419396208408E-14f*Y+1.12328861406073E-13f)*Y-
9.051094103059E-12f)*Y-4.781797525341E-11f)*Y+
1.660828868694E-09f)*Y+4.499058798868E-10f)*Y-
2.519549641933E-07f)*Y+4.977444040180E-06f)*Y-
1.25858350034589E-04f )*Y+2.70279176970044E-03f )*Y-
3.99327850801083E-02f )*Y+4.33467200855434E-01f;
RT4 = ((((((((((( 4.63414725924048E-14f*Y-4.72757262693062E-14f)*Y-
1.001926833832E-11f)*Y+6.074107718414E-11f)*Y+
1.576976911942E-09f)*Y-2.01186401974027E-08f )*Y-
1.84530195217118E-07f )*Y+5.02333087806827E-06f )*Y+
9.66961790843006E-06f )*Y-1.58522208889528E-03f )*Y+
2.80539673938339E-02f )*Y-2.78953904330072E-01f )*Y+
1.82835655238235E+00f;
WW4 = ((((((((((((( 2.90401781000996E-18f*Y-4.63389683098251E-17f)*
Y+6.274018198326E-16f)*Y-8.936002188168E-15f)*Y+
1.194719074934E-13f)*Y-1.45501321259466E-12f )*Y+
1.64090830181013E-11f )*Y-1.71987745310181E-10f )*Y+
1.63738403295718E-09f )*Y-1.39237504892842E-08f )*Y+
1.06527318142151E-07f )*Y-7.27634957230524E-07f )*Y+
4.12159381310339E-06f )*Y-1.74648169719173E-05f )*Y+
8.50290130067818E-05f;
WW3 = ((((((((((((-4.19569145459480E-17f*Y+5.94344180261644E-16f)*Y-
1.148797566469E-14f)*Y+1.881303962576E-13f)*Y-
2.413554618391E-12f)*Y+3.372127423047E-11f)*Y-
4.933988617784E-10f)*Y+6.116545396281E-09f)*Y-
6.69965691739299E-08f )*Y+7.52380085447161E-07f )*Y-
8.08708393262321E-06f )*Y+6.88603417296672E-05f )*Y-
4.67067112993427E-04f )*Y+5.42313365864597E-03f;
WW2 = ((((((((((-6.22272689880615E-15f*Y+1.04126809657554E-13f)*Y-
6.842418230913E-13f)*Y+1.576841731919E-11f)*Y-
4.203948834175E-10f)*Y+6.287255934781E-09f)*Y-
8.307159819228E-08f)*Y+1.356478091922E-06f)*Y-
2.08065576105639E-05f )*Y+2.52396730332340E-04f )*Y-
2.94484050194539E-03f )*Y+6.01396183129168E-02f;
WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X -
4.9893752514047E-01f)/X-2.1916512131607E-05f)*expf(-X) +
sqrtf(PIE4/X)-WW4-WW3-WW2;
} else if (X <= 20.f) {
WW1 = sqrtf(PIE4/X);
Y = X-17.5E+00f;
RT1 = ((((((((((( 4.36701759531398E-17f*Y-1.12860600219889E-16f)*Y-
6.149849164164E-15f)*Y+5.820231579541E-14f)*Y+
4.396602872143E-13f)*Y-1.24330365320172E-11f )*Y+
6.71083474044549E-11f )*Y+2.43865205376067E-10f )*Y+
1.67559587099969E-08f )*Y-9.32738632357572E-07f )*Y+
2.39030487004977E-05f )*Y-4.68648206591515E-04f )*Y+
8.34977776583956E-03f;
RT2 = ((((((((((( 4.98913142288158E-16f*Y-2.60732537093612E-16f)*Y-
7.775156445127E-14f)*Y+5.766105220086E-13f)*Y+
6.432696729600E-12f)*Y-1.39571683725792E-10f )*Y+
5.95451479522191E-10f )*Y+2.42471442836205E-09f )*Y+
2.47485710143120E-07f )*Y-1.14710398652091E-05f )*Y+
2.71252453754519E-04f )*Y-4.96812745851408E-03f )*Y+
8.26020602026780E-02f;
RT3 = ((((((((((( 1.91498302509009E-15f*Y+1.48840394311115E-14f)*Y-
4.316925145767E-13f)*Y+1.186495793471E-12f)*Y+
4.615806713055E-11f)*Y-5.54336148667141E-10f )*Y+
3.48789978951367E-10f )*Y-2.79188977451042E-09f )*Y+
2.09563208958551E-06f )*Y-6.76512715080324E-05f )*Y+
1.32129867629062E-03f )*Y-2.05062147771513E-02f )*Y+
2.88068671894324E-01f;
RT4 = (((((((((((-5.43697691672942E-15f*Y-1.12483395714468E-13f)*Y+
2.826607936174E-12f)*Y-1.266734493280E-11f)*Y-
4.258722866437E-10f)*Y+9.45486578503261E-09f )*Y-
5.86635622821309E-08f )*Y-1.28835028104639E-06f )*Y+
4.41413815691885E-05f )*Y-7.61738385590776E-04f )*Y+
9.66090902985550E-03f )*Y-1.01410568057649E-01f )*Y+
9.54714798156712E-01f;
WW4 = ((((((((((((-7.56882223582704E-19f*Y+7.53541779268175E-18f)*Y-
1.157318032236E-16f)*Y+2.411195002314E-15f)*Y-
3.601794386996E-14f)*Y+4.082150659615E-13f)*Y-
4.289542980767E-12f)*Y+5.086829642731E-11f)*Y-
6.35435561050807E-10f )*Y+6.82309323251123E-09f )*Y-
5.63374555753167E-08f )*Y+3.57005361100431E-07f )*Y-
2.40050045173721E-06f )*Y+4.94171300536397E-05f;
WW3 = (((((((((((-5.54451040921657E-17f*Y+2.68748367250999E-16f)*Y+
1.349020069254E-14f)*Y-2.507452792892E-13f)*Y+
1.944339743818E-12f)*Y-1.29816917658823E-11f )*Y+
3.49977768819641E-10f )*Y-8.67270669346398E-09f )*Y+
1.31381116840118E-07f )*Y-1.36790720600822E-06f )*Y+
1.19210697673160E-05f )*Y-1.42181943986587E-04f )*Y+
4.12615396191829E-03f;
WW2 = (((((((((((-1.86506057729700E-16f*Y+1.16661114435809E-15f)*Y+
2.563712856363E-14f)*Y-4.498350984631E-13f)*Y+
1.765194089338E-12f)*Y+9.04483676345625E-12f )*Y+
4.98930345609785E-10f )*Y-2.11964170928181E-08f )*Y+
3.98295476005614E-07f )*Y-5.49390160829409E-06f )*Y+
7.74065155353262E-05f )*Y-1.48201933009105E-03f )*Y+
4.97836392625268E-02f;
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*expf(-X)+WW1-WW2-WW3-WW4;
} else if (X <= 35.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X);
RT1 = ((((((-4.45711399441838E-05f*X+1.27267770241379E-03f)*X -
2.36954961381262E-01f)*X+1.54330657903756E+01f)*X -
5.22799159267808E+02f)*X+1.05951216669313E+04f)*X +
(-2.51177235556236E+06f/X+8.72975373557709E+05f)/X -
1.29194382386499E+05f)*E + R14/(X-R14);
RT2 = (((((-7.85617372254488E-02f*X+6.35653573484868E+00f)*X -
3.38296938763990E+02f)*X+1.25120495802096E+04f)*X -
3.16847570511637E+05f)*X +
((-1.02427466127427E+09f/X +
3.70104713293016E+08f)/X-5.87119005093822E+07f)/X +
5.38614211391604E+06f)*E + R24/(X-R24);
RT3 = (((((-2.37900485051067E-01f*X+1.84122184400896E+01f)*X -
1.00200731304146E+03f)*X+3.75151841595736E+04f)*X -
9.50626663390130E+05f)*X +
((-2.88139014651985E+09f/X +
1.06625915044526E+09f)/X-1.72465289687396E+08f)/X +
1.60419390230055E+07f)*E + R34/(X-R34);
RT4 = ((((((-6.00691586407385E-04f*X-3.64479545338439E-01f)*X +
1.57496131755179E+01f)*X-6.54944248734901E+02f)*X +
1.70830039597097E+04f)*X-2.90517939780207E+05f)*X +
(3.49059698304732E+07f/X-1.64944522586065E+07f)/X +
2.96817940164703E+06f)*E + R44/(X-R44);
if (X <= 25.f)
WW4 = ((((((( 2.33766206773151E-07f*X-
3.81542906607063E-05f)*X +3.51416601267000E-03f)*X-
1.66538571864728E-01f)*X +4.80006136831847E+00f)*X-
8.73165934223603E+01f)*X +9.77683627474638E+02f)*X +
1.66000945117640E+04f/X -6.14479071209961E+03f)*E + W44*WW1;
else
WW4 = (((((( 5.74245945342286E-06f*X-
7.58735928102351E-05f)*X +2.35072857922892E-04f)*X-
3.78812134013125E-03f)*X +3.09871652785805E-01f)*X-
7.11108633061306E+00f)*X +5.55297573149528E+01f)*E + W44*WW1;
WW3 = (((((( 2.36392855180768E-04f*X-9.16785337967013E-03f)*X +
4.62186525041313E-01f)*X-1.96943786006540E+01f)*X +
4.99169195295559E+02f)*X-6.21419845845090E+03f)*X +
((+5.21445053212414E+07f/X-1.34113464389309E+07f)/X +
1.13673298305631E+06f)/X-2.81501182042707E+03f)*E + W34*WW1;
WW2 = (((((( 7.29841848989391E-04f*X-3.53899555749875E-02f)*X +
2.07797425718513E+00f)*X-1.00464709786287E+02f)*X +
3.15206108877819E+03f)*X-6.27054715090012E+04f)*X +
(+1.54721246264919E+07f/X-5.26074391316381E+06f)/X +
7.67135400969617E+05f)*E + W24*WW1;
WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X -
6.0156581186481E-05f)*E + WW1-WW2-WW3-WW4;
} else if (X <= 53.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X)*powf(X,4.f);
RT4 = ((-2.19135070169653E-03f*X-1.19108256987623E-01f)*X -
7.50238795695573E-01f)*E + R44/(X-R44);
RT3 = ((-9.65842534508637E-04f*X-4.49822013469279E-02f)*X +
6.08784033347757E-01f)*E + R34/(X-R34);
RT2 = ((-3.62569791162153E-04f*X-9.09231717268466E-03f)*X +
1.84336760556262E-01f)*E + R24/(X-R24);
RT1 = ((-4.07557525914600E-05f*X-6.88846864931685E-04f)*X +
1.74725309199384E-02f)*E + R14/(X-R14);
WW4 = (( 5.76631982000990E-06f*X-7.89187283804890E-05f)*X +
3.28297971853126E-04f)*E + W44*WW1;
WW3 = (( 2.08294969857230E-04f*X-3.77489954837361E-03f)*X +
2.09857151617436E-02f)*E + W34*WW1;
WW2 = (( 6.16374517326469E-04f*X-1.26711744680092E-02f)*X +
8.14504890732155E-02f)*E + W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
} else {
WW1 = sqrtf(PIE4/X);
RT1 = R14/(X-R14);
RT2 = R24/(X-R24);
RT3 = R34/(X-R34);
RT4 = R44/(X-R44);
WW4 = W44*WW1;
WW3 = W34*WW1;
WW2 = W24*WW1;
WW1 = WW1-WW2-WW3-WW4;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
return;
}
__device__ void cuda_Root5(float X, float roots[], float weights[]){
float R15,PIE4,R25,W25,R35,W35,R45,W45,R55,W55;
float RT1=0,RT2=0,RT3=0,RT4=0,RT5=0,
WW1=0,WW2=0,WW3=0,WW4=0,WW5=0;
float Y,E=0,XXX;
R15 = 1.17581320211778E-01f;
PIE4 = 7.85398163397448E-01f;
R25 = 1.07456201243690E+00f;
W25 = 2.70967405960535E-01f;
R35 = 3.08593744371754E+00f;
W35 = 3.82231610015404E-02f;
R45 = 6.41472973366203E+00f;
W45 = 1.51614186862443E-03f;
R55 = 1.18071894899717E+01f;
W55 = 8.62130526143657E-06f;
if (X < 3.e-7f){
RT1 = 2.26659266316985E-02f -2.15865967920897E-03f *X;
RT2 = 2.31271692140903E-01f -2.20258754389745E-02f *X;
RT3 = 8.57346024118836E-01f -8.16520023025515E-02f *X;
RT4 = 2.97353038120346E+00f -2.83193369647137E-01f *X;
RT5 = 1.84151859759051E+01f -1.75382723579439E+00f *X;
WW1 = 2.95524224714752E-01f -1.96867576909777E-02f *X;
WW2 = 2.69266719309995E-01f -5.61737590184721E-02f *X;
WW3 = 2.19086362515981E-01f -9.71152726793658E-02f *X;
WW4 = 1.49451349150580E-01f -1.02979262193565E-01f *X;
WW5 = 6.66713443086877E-02f -5.73782817488315E-02f *X;
} else if (X < 1.f){
RT1 = ((((((-4.46679165328413E-11f*X+1.21879111988031E-09f)*X-
2.62975022612104E-08f )*X+5.15106194905897E-07f )*X-
9.27933625824749E-06f )*X+1.51794097682482E-04f )*X-
2.15865967920301E-03f )*X+2.26659266316985E-02f;
RT2 = (((((( 1.93117331714174E-10f*X-4.57267589660699E-09f)*X+
2.48339908218932E-08f )*X+1.50716729438474E-06f )*X-
6.07268757707381E-05f )*X+1.37506939145643E-03f )*X-
2.20258754419939E-02f )*X+2.31271692140905E-01f;
RT3 = ((((( 4.84989776180094E-09f*X+1.31538893944284E-07f)*X-
2.766753852879E-06f)*X-7.651163510626E-05f)*X+
4.033058545972E-03f)*X-8.16520022916145E-02f )*X+
8.57346024118779E-01f;
RT4 = ((((-2.48581772214623E-07f*X-4.34482635782585E-06f)*X-
7.46018257987630E-07f )*X+1.01210776517279E-02f )*X-
2.83193369640005E-01f )*X+2.97353038120345E+00f;
RT5 = (((((-8.92432153868554E-09f*X+1.77288899268988E-08f)*X+
3.040754680666E-06f)*X+1.058229325071E-04f)*X+
4.596379534985E-02f)*X-1.75382723579114E+00f )*X+
1.84151859759049E+01f;
WW1 = ((((((-2.03822632771791E-09f*X+3.89110229133810E-08f)*X-
5.84914787904823E-07f )*X+8.30316168666696E-06f )*X-
1.13218402310546E-04f )*X+1.49128888586790E-03f )*X-
1.96867576904816E-02f )*X+2.95524224714749E-01f;
WW2 = ((((((( 8.62848118397570E-09f*X-1.38975551148989E-07f)*X+
1.602894068228E-06f)*X-1.646364300836E-05f)*X+
1.538445806778E-04f)*X-1.28848868034502E-03f )*X+
9.38866933338584E-03f )*X-5.61737590178812E-02f )*X+
2.69266719309991E-01f;
WW3 = ((((((((-9.41953204205665E-09f*X+1.47452251067755E-07f)*X-
1.57456991199322E-06f )*X+1.45098401798393E-05f )*X-
1.18858834181513E-04f )*X+8.53697675984210E-04f )*X-
5.22877807397165E-03f )*X+2.60854524809786E-02f )*X-
9.71152726809059E-02f )*X+2.19086362515979E-01f;
WW4 = ((((((((-3.84961617022042E-08f*X+5.66595396544470E-07f)*X-
5.52351805403748E-06f )*X+4.53160377546073E-05f )*X-
3.22542784865557E-04f )*X+1.95682017370967E-03f )*X-
9.77232537679229E-03f )*X+3.79455945268632E-02f )*X-
1.02979262192227E-01f )*X+1.49451349150573E-01f;
WW5 = ((((((((( 4.09594812521430E-09f*X-6.47097874264417E-08f)*X+
6.743541482689E-07f)*X-5.917993920224E-06f)*X+
4.531969237381E-05f)*X-2.99102856679638E-04f )*X+
1.65695765202643E-03f )*X-7.40671222520653E-03f )*X+
2.50889946832192E-02f )*X-5.73782817487958E-02f )*X+
6.66713443086877E-02f;
} else if (X < 5.f) {
Y = X-3.0E+00f;
RT1 = ((((((((-2.58163897135138E-14f*Y+8.14127461488273E-13f)*Y-
2.11414838976129E-11f )*Y+5.09822003260014E-10f )*Y-
1.16002134438663E-08f )*Y+2.46810694414540E-07f )*Y-
4.92556826124502E-06f )*Y+9.02580687971053E-05f )*Y-
1.45190025120726E-03f )*Y+1.73416786387475E-02f;
RT2 = ((((((((( 1.04525287289788E-14f*Y+5.44611782010773E-14f)*Y-
4.831059411392E-12f)*Y+1.136643908832E-10f)*Y-
1.104373076913E-09f)*Y-2.35346740649916E-08f )*Y+
1.43772622028764E-06f )*Y-4.23405023015273E-05f )*Y+
9.12034574793379E-04f )*Y-1.52479441718739E-02f )*Y+
1.76055265928744E-01f;
RT3 = (((((((((-6.89693150857911E-14f*Y+5.92064260918861E-13f)*Y+
1.847170956043E-11f)*Y-3.390752744265E-10f)*Y-
2.995532064116E-09f)*Y+1.57456141058535E-07f )*Y-
3.95859409711346E-07f )*Y-9.58924580919747E-05f )*Y+
3.23551502557785E-03f )*Y-5.97587007636479E-02f )*Y+
6.46432853383057E-01f;
RT4 = ((((((((-3.61293809667763E-12f*Y-2.70803518291085E-11f)*Y+
8.83758848468769E-10f )*Y+1.59166632851267E-08f )*Y-
1.32581997983422E-07f )*Y-7.60223407443995E-06f )*Y-
7.41019244900952E-05f )*Y+9.81432631743423E-03f )*Y-
2.23055570487771E-01f )*Y+2.21460798080643E+00f;
RT5 = ((((((((( 7.12332088345321E-13f*Y+3.16578501501894E-12f)*Y-
8.776668218053E-11f)*Y-2.342817613343E-09f)*Y-
3.496962018025E-08f)*Y-3.03172870136802E-07f )*Y+
1.50511293969805E-06f )*Y+1.37704919387696E-04f )*Y+
4.70723869619745E-02f )*Y-1.47486623003693E+00f )*Y+
1.35704792175847E+01f;
WW1 = ((((((((( 1.04348658616398E-13f*Y-1.94147461891055E-12f)*Y+
3.485512360993E-11f)*Y-6.277497362235E-10f)*Y+
1.100758247388E-08f)*Y-1.88329804969573E-07f )*Y+
3.12338120839468E-06f )*Y-5.04404167403568E-05f )*Y+
8.00338056610995E-04f )*Y-1.30892406559521E-02f )*Y+
2.47383140241103E-01f;
WW2 = ((((((((((( 3.23496149760478E-14f*Y-5.24314473469311E-13f)*Y+
7.743219385056E-12f)*Y-1.146022750992E-10f)*Y+
1.615238462197E-09f)*Y-2.15479017572233E-08f )*Y+
2.70933462557631E-07f )*Y-3.18750295288531E-06f )*Y+
3.47425221210099E-05f )*Y-3.45558237388223E-04f )*Y+
3.05779768191621E-03f )*Y-2.29118251223003E-02f )*Y+
1.59834227924213E-01f;
WW3 = ((((((((((((-3.42790561802876E-14f*Y+5.26475736681542E-13f)*Y-
7.184330797139E-12f)*Y+9.763932908544E-11f)*Y-
1.244014559219E-09f)*Y+1.472744068942E-08f)*Y-
1.611749975234E-07f)*Y+1.616487851917E-06f)*Y-
1.46852359124154E-05f )*Y+1.18900349101069E-04f )*Y-
8.37562373221756E-04f )*Y+4.93752683045845E-03f )*Y-
2.25514728915673E-02f )*Y+6.95211812453929E-02f;
WW4 = ((((((((((((( 1.04072340345039E-14f*Y-1.60808044529211E-13f)*
Y+2.183534866798E-12f)*Y-2.939403008391E-11f)*Y+
3.679254029085E-10f)*Y-4.23775673047899E-09f )*Y+
4.46559231067006E-08f )*Y-4.26488836563267E-07f )*Y+
3.64721335274973E-06f )*Y-2.74868382777722E-05f )*Y+
1.78586118867488E-04f )*Y-9.68428981886534E-04f )*Y+
4.16002324339929E-03f )*Y-1.28290192663141E-02f )*Y+
2.22353727685016E-02f;
WW5 = ((((((((((((((-8.16770412525963E-16f*Y+1.31376515047977E-14f)*
Y-1.856950818865E-13f)*Y+2.596836515749E-12f)*Y-
3.372639523006E-11f)*Y+4.025371849467E-10f)*Y-
4.389453269417E-09f)*Y+4.332753856271E-08f)*Y-
3.82673275931962E-07f )*Y+2.98006900751543E-06f )*Y-
2.00718990300052E-05f )*Y+1.13876001386361E-04f )*Y-
5.23627942443563E-04f )*Y+1.83524565118203E-03f )*Y-
4.37785737450783E-03f )*Y+5.36963805223095E-03f;
} else if (X < 10.f) {
Y = X-7.5E+00f;
RT1 = ((((((((-1.13825201010775E-14f*Y+1.89737681670375E-13f)*Y-
4.81561201185876E-12f )*Y+1.56666512163407E-10f )*Y-
3.73782213255083E-09f )*Y+9.15858355075147E-08f )*Y-
2.13775073585629E-06f )*Y+4.56547356365536E-05f )*Y-
8.68003909323740E-04f )*Y+1.22703754069176E-02f;
RT2 = (((((((((-3.67160504428358E-15f*Y+1.27876280158297E-14f)*Y-
1.296476623788E-12f)*Y+1.477175434354E-11f)*Y+
5.464102147892E-10f)*Y-2.42538340602723E-08f )*Y+
8.20460740637617E-07f )*Y-2.20379304598661E-05f )*Y+
4.90295372978785E-04f )*Y-9.14294111576119E-03f )*Y+
1.22590403403690E-01f;
RT3 = ((((((((( 1.39017367502123E-14f*Y-6.96391385426890E-13f)*Y+
1.176946020731E-12f)*Y+1.725627235645E-10f)*Y-
3.686383856300E-09f)*Y+2.87495324207095E-08f )*Y+
1.71307311000282E-06f )*Y-7.94273603184629E-05f )*Y+
2.00938064965897E-03f )*Y-3.63329491677178E-02f )*Y+
4.34393683888443E-01f;
RT4 = ((((((((((-1.27815158195209E-14f*Y+1.99910415869821E-14f)*Y+
3.753542914426E-12f)*Y-2.708018219579E-11f)*Y-
1.190574776587E-09f)*Y+1.106696436509E-08f)*Y+
3.954955671326E-07f)*Y-4.398596059588E-06f)*Y-
2.01087998907735E-04f )*Y+7.89092425542937E-03f )*Y-
1.42056749162695E-01f )*Y+1.39964149420683E+00f;
RT5 = ((((((((((-1.19442341030461E-13f*Y-2.34074833275956E-12f)*Y+
6.861649627426E-12f)*Y+6.082671496226E-10f)*Y+
5.381160105420E-09f)*Y-6.253297138700E-08f)*Y-
2.135966835050E-06f)*Y-2.373394341886E-05f)*Y+
2.88711171412814E-06f )*Y+4.85221195290753E-02f )*Y-
1.04346091985269E+00f )*Y+7.89901551676692E+00f;
WW1 = ((((((((( 7.95526040108997E-15f*Y-2.48593096128045E-13f)*Y+
4.761246208720E-12f)*Y-9.535763686605E-11f)*Y+
2.225273630974E-09f)*Y-4.49796778054865E-08f )*Y+
9.17812870287386E-07f )*Y-1.86764236490502E-05f )*Y+
3.76807779068053E-04f )*Y-8.10456360143408E-03f )*Y+
2.01097936411496E-01f;
WW2 = ((((((((((( 1.25678686624734E-15f*Y-2.34266248891173E-14f)*Y+
3.973252415832E-13f)*Y-6.830539401049E-12f)*Y+
1.140771033372E-10f)*Y-1.82546185762009E-09f )*Y+
2.77209637550134E-08f )*Y-4.01726946190383E-07f )*Y+
5.48227244014763E-06f )*Y-6.95676245982121E-05f )*Y+
8.05193921815776E-04f )*Y-8.15528438784469E-03f )*Y+
9.71769901268114E-02f;
WW3 = ((((((((((((-8.20929494859896E-16f*Y+1.37356038393016E-14f)*Y-
2.022863065220E-13f)*Y+3.058055403795E-12f)*Y-
4.387890955243E-11f)*Y+5.923946274445E-10f)*Y-
7.503659964159E-09f)*Y+8.851599803902E-08f)*Y-
9.65561998415038E-07f )*Y+9.60884622778092E-06f )*Y-
8.56551787594404E-05f )*Y+6.66057194311179E-04f )*Y-
4.17753183902198E-03f )*Y+2.25443826852447E-02f;
WW4 = ((((((((((((((-1.08764612488790E-17f*Y+1.85299909689937E-16f)*
Y-2.730195628655E-15f)*Y+4.127368817265E-14f)*Y-
5.881379088074E-13f)*Y+7.805245193391E-12f)*Y-
9.632707991704E-11f)*Y+1.099047050624E-09f)*Y-
1.15042731790748E-08f )*Y+1.09415155268932E-07f )*Y-
9.33687124875935E-07f )*Y+7.02338477986218E-06f )*Y-
4.53759748787756E-05f )*Y+2.41722511389146E-04f )*Y-
9.75935943447037E-04f )*Y+2.57520532789644E-03f;
WW5 = ((((((((((((((( 7.28996979748849E-19f*Y-1.26518146195173E-17f)
*Y+1.886145834486E-16f)*Y-2.876728287383E-15f)*Y+
4.114588668138E-14f)*Y-5.44436631413933E-13f )*Y+
6.64976446790959E-12f )*Y-7.44560069974940E-11f )*Y+
7.57553198166848E-10f )*Y-6.92956101109829E-09f )*Y+
5.62222859033624E-08f )*Y-3.97500114084351E-07f )*Y+
2.39039126138140E-06f )*Y-1.18023950002105E-05f )*Y+
4.52254031046244E-05f )*Y-1.21113782150370E-04f )*Y+
1.75013126731224E-04f;
} else if (X < 15.f) {
Y = X-12.5E+00f;
RT1 = ((((((((((-4.16387977337393E-17f*Y+7.20872997373860E-16f)*Y+
1.395993802064E-14f)*Y+3.660484641252E-14f)*Y-
4.154857548139E-12f)*Y+2.301379846544E-11f)*Y-
1.033307012866E-09f)*Y+3.997777641049E-08f)*Y-
9.35118186333939E-07f )*Y+2.38589932752937E-05f )*Y-
5.35185183652937E-04f )*Y+8.85218988709735E-03f;
RT2 = ((((((((((-4.56279214732217E-16f*Y+6.24941647247927E-15f)*Y+
1.737896339191E-13f)*Y+8.964205979517E-14f)*Y-
3.538906780633E-11f)*Y+9.561341254948E-11f)*Y-
9.772831891310E-09f)*Y+4.240340194620E-07f)*Y-
1.02384302866534E-05f )*Y+2.57987709704822E-04f )*Y-
5.54735977651677E-03f )*Y+8.68245143991948E-02f;
RT3 = ((((((((((-2.52879337929239E-15f*Y+2.13925810087833E-14f)*Y+
7.884307667104E-13f)*Y-9.023398159510E-13f)*Y-
5.814101544957E-11f)*Y-1.333480437968E-09f)*Y-
2.217064940373E-08f)*Y+1.643290788086E-06f)*Y-
4.39602147345028E-05f )*Y+1.08648982748911E-03f )*Y-
2.13014521653498E-02f )*Y+2.94150684465425E-01f;
RT4 = ((((((((((-6.42391438038888E-15f*Y+5.37848223438815E-15f)*Y+
8.960828117859E-13f)*Y+5.214153461337E-11f)*Y-
1.106601744067E-10f)*Y-2.007890743962E-08f)*Y+
1.543764346501E-07f)*Y+4.520749076914E-06f)*Y-
1.88893338587047E-04f )*Y+4.73264487389288E-03f )*Y-
7.91197893350253E-02f )*Y+8.60057928514554E-01f;
RT5 = (((((((((((-2.24366166957225E-14f*Y+4.87224967526081E-14f)*Y+
5.587369053655E-12f)*Y-3.045253104617E-12f)*Y-
1.223983883080E-09f)*Y-2.05603889396319E-09f )*Y+
2.58604071603561E-07f )*Y+1.34240904266268E-06f )*Y-
5.72877569731162E-05f )*Y-9.56275105032191E-04f )*Y+
4.23367010370921E-02f )*Y-5.76800927133412E-01f )*Y+
3.87328263873381E+00f;
WW1 = ((((((((( 8.98007931950169E-15f*Y+7.25673623859497E-14f)*Y+
5.851494250405E-14f)*Y-4.234204823846E-11f)*Y+
3.911507312679E-10f)*Y-9.65094802088511E-09f )*Y+
3.42197444235714E-07f )*Y-7.51821178144509E-06f )*Y+
1.94218051498662E-04f )*Y-5.38533819142287E-03f )*Y+
1.68122596736809E-01f;
WW2 = ((((((((((-1.05490525395105E-15f*Y+1.96855386549388E-14f)*Y-
5.500330153548E-13f)*Y+1.003849567976E-11f)*Y-
1.720997242621E-10f)*Y+3.533277061402E-09f)*Y-
6.389171736029E-08f)*Y+1.046236652393E-06f)*Y-
1.73148206795827E-05f )*Y+2.57820531617185E-04f )*Y-
3.46188265338350E-03f )*Y+7.03302497508176E-02f;
WW3 = ((((((((((( 3.60020423754545E-16f*Y-6.24245825017148E-15f)*Y+
9.945311467434E-14f)*Y-1.749051512721E-12f)*Y+
2.768503957853E-11f)*Y-4.08688551136506E-10f )*Y+
6.04189063303610E-09f )*Y-8.23540111024147E-08f )*Y+
1.01503783870262E-06f )*Y-1.20490761741576E-05f )*Y+
1.26928442448148E-04f )*Y-1.05539461930597E-03f )*Y+
1.15543698537013E-02f;
WW4 = ((((((((((((( 2.51163533058925E-18f*Y-4.31723745510697E-17f)*
Y+6.557620865832E-16f)*Y-1.016528519495E-14f)*Y+
1.491302084832E-13f)*Y-2.06638666222265E-12f )*Y+
2.67958697789258E-11f )*Y-3.23322654638336E-10f )*Y+
3.63722952167779E-09f )*Y-3.75484943783021E-08f )*Y+
3.49164261987184E-07f )*Y-2.92658670674908E-06f )*Y+
2.12937256719543E-05f )*Y-1.19434130620929E-04f )*Y+
6.45524336158384E-04f;
WW5 = ((((((((((((((-1.29043630202811E-19f*Y+2.16234952241296E-18f)*
Y-3.107631557965E-17f)*Y+4.570804313173E-16f)*Y-
6.301348858104E-15f)*Y+8.031304476153E-14f)*Y-
9.446196472547E-13f)*Y+1.018245804339E-11f)*Y-
9.96995451348129E-11f )*Y+8.77489010276305E-10f )*Y-
6.84655877575364E-09f )*Y+4.64460857084983E-08f )*Y-
2.66924538268397E-07f )*Y+1.24621276265907E-06f )*Y-
4.30868944351523E-06f )*Y+9.94307982432868E-06f;
} else if (X < 20.f){
Y = X-17.5E+00f;
RT1 = (((((((((( 1.91875764545740E-16f*Y+7.8357401095707E-16f)*Y-
3.260875931644E-14f)*Y-1.186752035569E-13f)*Y+
4.275180095653E-12f)*Y+3.357056136731E-11f)*Y-
1.123776903884E-09f)*Y+1.231203269887E-08f)*Y-
3.99851421361031E-07f )*Y+1.45418822817771E-05f )*Y-
3.49912254976317E-04f )*Y+6.67768703938812E-03f;
RT2 = (((((((((( 2.02778478673555E-15f*Y+1.01640716785099E-14f)*Y-
3.385363492036E-13f)*Y-1.615655871159E-12f)*Y+
4.527419140333E-11f)*Y+3.853670706486E-10f)*Y-
1.184607130107E-08f)*Y+1.347873288827E-07f)*Y-
4.47788241748377E-06f )*Y+1.54942754358273E-04f )*Y-
3.55524254280266E-03f )*Y+6.44912219301603E-02f;
RT3 = (((((((((( 7.79850771456444E-15f*Y+6.00464406395001E-14f)*Y-
1.249779730869E-12f)*Y-1.020720636353E-11f)*Y+
1.814709816693E-10f)*Y+1.766397336977E-09f)*Y-
4.603559449010E-08f)*Y+5.863956443581E-07f)*Y-
2.03797212506691E-05f )*Y+6.31405161185185E-04f )*Y-
1.30102750145071E-02f )*Y+2.10244289044705E-01f;
RT4 = (((((((((((-2.92397030777912E-15f*Y+1.94152129078465E-14f)*Y+
4.859447665850E-13f)*Y-3.217227223463E-12f)*Y-
7.484522135512E-11f)*Y+7.19101516047753E-10f )*Y+
6.88409355245582E-09f )*Y-1.44374545515769E-07f )*Y+
2.74941013315834E-06f )*Y-1.02790452049013E-04f )*Y+
2.59924221372643E-03f )*Y-4.35712368303551E-02f )*Y+
5.62170709585029E-01f;
RT5 = ((((((((((( 1.17976126840060E-14f*Y+1.24156229350669E-13f)*Y-
3.892741622280E-12f)*Y-7.755793199043E-12f)*Y+
9.492190032313E-10f)*Y-4.98680128123353E-09f )*Y-
1.81502268782664E-07f )*Y+2.69463269394888E-06f )*Y+
2.50032154421640E-05f )*Y-1.33684303917681E-03f )*Y+
2.29121951862538E-02f )*Y-2.45653725061323E-01f )*Y+
1.89999883453047E+00f;
WW1 = (((((((((( 1.74841995087592E-15f*Y-6.95671892641256E-16f)*Y-
3.000659497257E-13f)*Y+2.021279817961E-13f)*Y+
3.853596935400E-11f)*Y+1.461418533652E-10f)*Y-
1.014517563435E-08f)*Y+1.132736008979E-07f)*Y-
2.86605475073259E-06f )*Y+1.21958354908768E-04f )*Y-
3.86293751153466E-03f )*Y+1.45298342081522E-01f;
WW2 = ((((((((((-1.11199320525573E-15f*Y+1.85007587796671E-15f)*Y+
1.220613939709E-13f)*Y+1.275068098526E-12f)*Y-
5.341838883262E-11f)*Y+6.161037256669E-10f)*Y-
1.009147879750E-08f)*Y+2.907862965346E-07f)*Y-
6.12300038720919E-06f )*Y+1.00104454489518E-04f )*Y-
1.80677298502757E-03f )*Y+5.78009914536630E-02f;
WW3 = ((((((((((-9.49816486853687E-16f*Y+6.67922080354234E-15f)*Y+
2.606163540537E-15f)*Y+1.983799950150E-12f)*Y-
5.400548574357E-11f)*Y+6.638043374114E-10f)*Y-
8.799518866802E-09f)*Y+1.791418482685E-07f)*Y-
2.96075397351101E-06f )*Y+3.38028206156144E-05f )*Y-
3.58426847857878E-04f )*Y+8.39213709428516E-03f;
WW4 = ((((((((((( 1.33829971060180E-17f*Y-3.44841877844140E-16f)*Y+
4.745009557656E-15f)*Y-6.033814209875E-14f)*Y+
1.049256040808E-12f)*Y-1.70859789556117E-11f )*Y+
2.15219425727959E-10f )*Y-2.52746574206884E-09f )*Y+
3.27761714422960E-08f )*Y-3.90387662925193E-07f )*Y+
3.46340204593870E-06f )*Y-2.43236345136782E-05f )*Y+
3.54846978585226E-04f;
WW5 = ((((((((((((( 2.69412277020887E-20f*Y-4.24837886165685E-19f)*
Y+6.030500065438E-18f)*Y-9.069722758289E-17f)*Y+
1.246599177672E-15f)*Y-1.56872999797549E-14f )*Y+
1.87305099552692E-13f )*Y-2.09498886675861E-12f )*Y+
2.11630022068394E-11f )*Y-1.92566242323525E-10f )*Y+
1.62012436344069E-09f )*Y-1.23621614171556E-08f )*Y+
7.72165684563049E-08f )*Y-3.59858901591047E-07f )*Y+
2.43682618601000E-06f;
} else if (X < 25.f) {
Y = X-22.5E+00f;
RT1 = (((((((((-1.13927848238726E-15f*Y+7.39404133595713E-15f)*Y+
1.445982921243E-13f)*Y-2.676703245252E-12f)*Y+
5.823521627177E-12f)*Y+2.17264723874381E-10f )*Y+
3.56242145897468E-09f )*Y-3.03763737404491E-07f )*Y+
9.46859114120901E-06f )*Y-2.30896753853196E-04f )*Y+
5.24663913001114E-03f;
RT2 = (((((((((( 2.89872355524581E-16f*Y-1.22296292045864E-14f)*Y+
6.184065097200E-14f)*Y+1.649846591230E-12f)*Y-
2.729713905266E-11f)*Y+3.709913790650E-11f)*Y+
2.216486288382E-09f)*Y+4.616160236414E-08f)*Y-
3.32380270861364E-06f )*Y+9.84635072633776E-05f )*Y-
2.30092118015697E-03f )*Y+5.00845183695073E-02f;
RT3 = (((((((((( 1.97068646590923E-15f*Y-4.89419270626800E-14f)*Y+
1.136466605916E-13f)*Y+7.546203883874E-12f)*Y-
9.635646767455E-11f)*Y-8.295965491209E-11f)*Y+
7.534109114453E-09f)*Y+2.699970652707E-07f)*Y-
1.42982334217081E-05f )*Y+3.78290946669264E-04f )*Y-
8.03133015084373E-03f )*Y+1.58689469640791E-01f;
RT4 = (((((((((( 1.33642069941389E-14f*Y-1.55850612605745E-13f)*Y-
7.522712577474E-13f)*Y+3.209520801187E-11f)*Y-
2.075594313618E-10f)*Y-2.070575894402E-09f)*Y+
7.323046997451E-09f)*Y+1.851491550417E-06f)*Y-
6.37524802411383E-05f )*Y+1.36795464918785E-03f )*Y-
2.42051126993146E-02f )*Y+3.97847167557815E-01f;
RT5 = ((((((((((-6.07053986130526E-14f*Y+1.04447493138843E-12f)*Y-
4.286617818951E-13f)*Y-2.632066100073E-10f)*Y+
4.804518986559E-09f)*Y-1.835675889421E-08f)*Y-
1.068175391334E-06f)*Y+3.292234974141E-05f)*Y-
5.94805357558251E-04f )*Y+8.29382168612791E-03f )*Y-
9.93122509049447E-02f )*Y+1.09857804755042E+00f;
WW1 = (((((((((-9.10338640266542E-15f*Y+1.00438927627833E-13f)*Y+
7.817349237071E-13f)*Y-2.547619474232E-11f)*Y+
1.479321506529E-10f)*Y+1.52314028857627E-09f )*Y+
9.20072040917242E-09f )*Y-2.19427111221848E-06f )*Y+
8.65797782880311E-05f )*Y-2.82718629312875E-03f )*Y+
1.28718310443295E-01f;
WW2 = ((((((((( 5.52380927618760E-15f*Y-6.43424400204124E-14f)*Y-
2.358734508092E-13f)*Y+8.261326648131E-12f)*Y+
9.229645304956E-11f)*Y-5.68108973828949E-09f )*Y+
1.22477891136278E-07f )*Y-2.11919643127927E-06f )*Y+
4.23605032368922E-05f )*Y-1.14423444576221E-03f )*Y+
5.06607252890186E-02f;
WW3 = ((((((((( 3.99457454087556E-15f*Y-5.11826702824182E-14f)*Y-
4.157593182747E-14f)*Y+4.214670817758E-12f)*Y+
6.705582751532E-11f)*Y-3.36086411698418E-09f )*Y+
6.07453633298986E-08f )*Y-7.40736211041247E-07f )*Y+
8.84176371665149E-06f )*Y-1.72559275066834E-04f )*Y+
7.16639814253567E-03f;
WW4 = (((((((((((-2.14649508112234E-18f*Y-2.45525846412281E-18f)*Y+
6.126212599772E-16f)*Y-8.526651626939E-15f)*Y+
4.826636065733E-14f)*Y-3.39554163649740E-13f )*Y+
1.67070784862985E-11f )*Y-4.42671979311163E-10f )*Y+
6.77368055908400E-09f )*Y-7.03520999708859E-08f )*Y+
6.04993294708874E-07f )*Y-7.80555094280483E-06f )*Y+
2.85954806605017E-04f;
WW5 = ((((((((((((-5.63938733073804E-21f*Y+6.92182516324628E-20f)*Y-
1.586937691507E-18f)*Y+3.357639744582E-17f)*Y-
4.810285046442E-16f)*Y+5.386312669975E-15f)*Y-
6.117895297439E-14f)*Y+8.441808227634E-13f)*Y-
1.18527596836592E-11f )*Y+1.36296870441445E-10f )*Y-
1.17842611094141E-09f )*Y+7.80430641995926E-09f )*Y-
5.97767417400540E-08f )*Y+1.65186146094969E-06f;
} else if (X < 40.f) {
WW1 = sqrtf(PIE4/X);
E = expf(-X);
RT1 = ((((((((-1.73363958895356E-06f*X+1.19921331441483E-04f)*X -
1.59437614121125E-02f)*X+1.13467897349442E+00f)*X -
4.47216460864586E+01f)*X+1.06251216612604E+03f)*X -
1.52073917378512E+04f)*X+1.20662887111273E+05f)*X -
4.07186366852475E+05f)*E + R15/(X-R15);
RT2 = ((((((((-1.60102542621710E-05f*X+1.10331262112395E-03f)*X -
1.50043662589017E-01f)*X+1.05563640866077E+01f)*X -
4.10468817024806E+02f)*X+9.62604416506819E+03f)*X -
1.35888069838270E+05f)*X+1.06107577038340E+06f)*X -
3.51190792816119E+06f)*E + R25/(X-R25);
RT3 = ((((((((-4.48880032128422E-05f*X+2.69025112122177E-03f)*X -
4.01048115525954E-01f)*X+2.78360021977405E+01f)*X -
1.04891729356965E+03f)*X+2.36985942687423E+04f)*X -
3.19504627257548E+05f)*X+2.34879693563358E+06f)*X -
7.16341568174085E+06f)*E + R35/(X-R35);
RT4 = ((((((((-6.38526371092582E-05f*X-2.29263585792626E-03f)*X -
7.65735935499627E-02f)*X+9.12692349152792E+00f)*X -
2.32077034386717E+02f)*X+2.81839578728845E+02f)*X +
9.59529683876419E+04f)*X-1.77638956809518E+06f)*X +
1.02489759645410E+07f)*E + R45/(X-R45);
RT5 = ((((((((-3.59049364231569E-05f*X-2.25963977930044E-02f)*X +
1.12594870794668E+00f)*X-4.56752462103909E+01f)*X +
1.05804526830637E+03f)*X-1.16003199605875E+04f)*X -
4.07297627297272E+04f)*X+2.22215528319857E+06f)*X -
1.61196455032613E+07f)*E + R55/(X-R55);
WW5 = (((((((((-4.61100906133970E-10f*X+1.43069932644286E-07f)*X -
1.63960915431080E-05f)*X+1.15791154612838E-03f)*X -
5.30573476742071E-02f)*X+1.61156533367153E+00f)*X -
3.23248143316007E+01f)*X+4.12007318109157E+02f)*X -
3.02260070158372E+03f)*X+9.71575094154768E+03f)*E + W55*WW1;
WW4 = (((((((((-2.40799435809950E-08f*X+8.12621667601546E-06f)*X -
9.04491430884113E-04f)*X+6.37686375770059E-02f)*X -
2.96135703135647E+00f)*X+9.15142356996330E+01f)*X -
1.86971865249111E+03f)*X+2.42945528916947E+04f)*X -
1.81852473229081E+05f)*X+5.96854758661427E+05f)*E + W45*WW1;
WW3 = (((((((( 1.83574464457207E-05f*X-1.54837969489927E-03f)*X +
1.18520453711586E-01f)*X-6.69649981309161E+00f)*X +
2.44789386487321E+02f)*X-5.68832664556359E+03f)*X +
8.14507604229357E+04f)*X-6.55181056671474E+05f)*X +
2.26410896607237E+06f)*E + W35*WW1;
WW2 = (((((((( 2.77778345870650E-05f*X-2.22835017655890E-03f)*X +
1.61077633475573E-01f)*X-8.96743743396132E+00f)*X +
3.28062687293374E+02f)*X-7.65722701219557E+03f)*X +
1.10255055017664E+05f)*X-8.92528122219324E+05f)*X +
3.10638627744347E+06f)*E + W25*WW1;
WW1 = WW1-0.01962E+00f*E-WW2-WW3-WW4-WW5;
} else if (X < 59.f) {
WW1 = sqrtf(PIE4/X);
XXX = powf(X,3.f);
E = XXX*expf(-X);
RT1 = (((-2.43758528330205E-02f*X+2.07301567989771E+00f)*X -
6.45964225381113E+01f)*X+7.14160088655470E+02f)*E + R15/(X-R15);
RT2 = (((-2.28861955413636E-01f*X+1.93190784733691E+01f)*X -
5.99774730340912E+02f)*X+6.61844165304871E+03f)*E + R25/(X-R25);
RT3 = (((-6.95053039285586E-01f*X+5.76874090316016E+01f)*X -
1.77704143225520E+03f)*X+1.95366082947811E+04f)*E + R35/(X-R35);
RT4 = (((-1.58072809087018E+00f*X+1.27050801091948E+02f)*X -
3.86687350914280E+03f)*X+4.23024828121420E+04f)*E + R45/(X-R45);
RT5 = (((-3.33963830405396E+00f*X+2.51830424600204E+02f)*X -
7.57728527654961E+03f)*X+8.21966816595690E+04f)*E + R55/(X-R55);
E = XXX*E;
WW5 = (( 1.35482430510942E-08f*X-3.27722199212781E-07f)*X +
2.41522703684296E-06f)*E + W55*WW1;
WW4 = (( 1.23464092261605E-06f*X-3.55224564275590E-05f)*X +
3.03274662192286E-04f)*E + W45*WW1;
WW3 = (( 1.34547929260279E-05f*X-4.19389884772726E-04f)*X +
3.87706687610809E-03f)*E + W35*WW1;
WW2 = (( 2.09539509123135E-05f*X-6.87646614786982E-04f)*X +
6.68743788585688E-03f)*E + W25*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
} else {
WW1 = sqrtf(PIE4/X);
RT1 = R15/(X-R15);
RT2 = R25/(X-R25);
RT3 = R35/(X-R35);
RT4 = R45/(X-R45);
RT5 = R55/(X-R55);
WW2 = W25*WW1;
WW3 = W35*WW1;
WW4 = W45*WW1;
WW5 = W55*WW1;
WW1 = WW1-WW2-WW3-WW4-WW5;
}
roots[0] = RT1;
weights[0] = WW1;
roots[1] = RT2;
weights[1] = WW2;
roots[2] = RT3;
weights[2] = WW3;
roots[3] = RT4;
weights[3] = WW4;
roots[4] = RT5;
weights[4] = WW5;
return;
}
__device__ void cuda_Root6(int n,float X, float roots[], float weights[]){
// Root6 not implemented yet
return;
}
__device__ float cuda_Int1d(int i, int j, int k, int l,
float xi, float xj, float xk, float xl,
float alpha_ij_A, float alpha_kl_B, float sqrt_AB,
float A, float B, float Px, float Qx,
float inv_t1, float B00, float B1, float B1p,
float G[][MAXROOTS])
{
// Form G(n,m)=I(n,0,m,0) intermediate values for a Rys polynomial
int n = i+j;
int m = k+l;
float xij = xi-xj;
float xkl = xk-xl;
// RecurFactorsGamess
float C = (Px-xi) * inv_t1 + (B*(Qx-xi)+A*(Px-xi))*B00*2.0;
float Cp = (Qx-xk) * inv_t1 + (B*(Qx-xk)+A*(Px-xk))*B00*2.0;
// ABD eq 11.
G[0][0] = (float)M_PI * expf(-alpha_ij_A*xij*xij -alpha_kl_B*xkl*xkl) / sqrt_AB;
if (n > 0) { G[1][0] = C *G[0][0]; } // ABD eq 15
if (m > 0) { G[0][1] = Cp*G[0][0]; } // ABD eq 16
for (int a = 2; a < n+1; ++ a) { G[a][0] = B1 *(a-1)*G[a-2][0] + C *G[a-1][0]; }
for (int b = 2; b < m+1; ++ b) { G[0][b] = B1p*(b-1)*G[0][b-2] + Cp*G[0][b-1]; }
if ((m>0) && (n>0)){
for (int a=1; a<n+1; ++a){
G[a][1] = a*B00*G[a-1][0] + Cp*G[a][0];
for (int b=2; b<m+1; ++b)
G[a][b] = B1p*(b-1)*G[a][b-2] + a*B00*G[a-1][b-1] + Cp*G[a][b-1];
}
}
// Compute and output I(i,j,k,l) from I(i+j,0,k+l,0) (G)
float ijkl = 0.0;
for (int m=0; m<l+1; ++m){
float ijm0 = 0.0;
for (int n=0; n<j+1; ++n) // I(i,j,m,0)<-I(n,0,m,0)
ijm0 += cuda_binomial(j,n)*powf(xij,(float)(j-n))*G[n+i][m+k];
ijkl += cuda_binomial(l,m)*powf(xkl,(float)(l-m))*ijm0; // I(i,j,k,l)<-I(i,j,m,0)
}
return ijkl;
}
// calculate ERI over 4 primitive basis functions
__device__ float cuda_rys_pbf(const double *ptr_i, const double *ptr_j,
const double *ptr_k, const double *ptr_l)
{
// download xyz, lmn, expon, and coef*norm
float xa = (float)ptr_i[0];
float ya = (float)ptr_i[1];
float za = (float)ptr_i[2];
int la = (int)ptr_i[3];
int ma = (int)ptr_i[4];
int na = (int)ptr_i[5];
float alphaa = (float)ptr_i[6];
float norma = (float)ptr_i[7];
float xb = (float)ptr_j[0];
float yb = (float)ptr_j[1];
float zb = (float)ptr_j[2];
int lb = (int)ptr_j[3];
int mb = (int)ptr_j[4];
int nb = (int)ptr_j[5];
float alphab = (float)ptr_j[6];
float normb = (float)ptr_j[7];
float xc = (float)ptr_k[0];
float yc = (float)ptr_k[1];
float zc = (float)ptr_k[2];
int lc = (int)ptr_k[3];
int mc = (int)ptr_k[4];
int nc = (int)ptr_k[5];
float alphac = (float)ptr_k[6];
float normc = (float)ptr_k[7];
float xd = (float)ptr_l[0];
float yd = (float)ptr_l[1];
float zd = (float)ptr_l[2];
int ld = (int)ptr_l[3];
int md = (int)ptr_l[4];
int nd = (int)ptr_l[5];
float alphad = (float)ptr_l[6];
float normd = (float)ptr_l[7];
// calculate primitive integral [ij|kl]
int norder,i;
float A,B,xp,yp,zp,xq,yq,zq,X,rho,sum,t,Ix,Iy,Iz;
norder = (la+ma+na+lb+nb+mb+lc+mc+nc+ld+md+nd)/2 + 1;
A = alphaa+alphab;
B = alphac+alphad;
xp = (alphaa*xa+alphab*xb)/A;
yp = (alphaa*ya+alphab*yb)/A;
zp = (alphaa*za+alphab*zb)/A;
xq = (alphac*xc+alphad*xd)/B;
yq = (alphac*yc+alphad*yd)/B;
zq = (alphac*zc+alphad*zd)/B;
rho = A*B/(A+B);
X = rho * ((xp-xq)*(xp-xq)+(yp-yq)*(yp-yq)+(zp-zq)*(zp-zq));
float alpha_ab_A = alphaa * alphab / A;
float alpha_cd_B = alphac * alphad / B;
float sqrt_AB = sqrtf(A * B);
float roots[MAXROOTS],weights[MAXROOTS];
float G[MAXROOTS][MAXROOTS];
cuda_Roots(norder,X,roots,weights); // get currect roots/weights
sum = 0.;
for (i=0; i<norder; ++i){
t = roots[i];
float inv_t1, B00, B1, B1p;
inv_t1 = 1.f / (1.f + t);
B00 = 0.5f * t/(A+B) * inv_t1;
B1 = 0.5f / A * inv_t1 + B00;
B1p = 0.5f / B * inv_t1 + B00;
Ix = cuda_Int1d(la,lb,lc,ld, xa,xb,xc,xd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,xp,xq, inv_t1,B00,B1,B1p, G);
Iy = cuda_Int1d(ma,mb,mc,md, ya,yb,yc,yd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,yp,yq, inv_t1,B00,B1,B1p, G);
Iz = cuda_Int1d(na,nb,nc,nd, za,zb,zc,zd,
alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,zp,zq, inv_t1,B00,B1,B1p, G);
sum = sum + Ix*Iy*Iz*weights[i]; /* ABD eq 5 & 9 */
}
// inv_sqrt_pi_2: 2.0*sqrtf(1.0/M_PI) = 1.12837916709551255856
return 1.12837916709551255856f * sqrtf(rho)*norma*normb*normc*normd*sum; /* ABD eq 5 & 9 */
}
// calculate J matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_J_PI(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_J_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_J_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [ij|??] and sum up to a primitive J matrix element
int i = blockIdx.x;
int j = blockIdx.y;
// avoid accessing out of bounds elements and make use of i<=>j symmetry
if (i >= n_pbf || j > i) { return; }
int ij = cuda_ij2intindex(i,j);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_j = &pbf_xlec[j * 8];
int a = pbf_to_cbf[i];
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
// initialize shared array
elem_J_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int k = threadIdx.x; k < n_pbf; k += BLOCKSIZE)
{
int c = pbf_to_cbf[k];
const double *ptr_k = &pbf_xlec[k * 8];
// NOTE: make use of k<=>l symmetry
for (int l = threadIdx.y; l <= k; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[cd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: doubling for off-diagonal elements of D due to k<=>l symmetry
elem_J_PI[threadIdx.x *BLOCKSIZE + threadIdx.y] += this_eri * mat_D[cd] * (k == l ? 1.0 : 2.0);
}
}
__syncthreads();
// only update mat_J_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_J_PI[ij] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_J_PI[ij] += elem_J_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
// calculate K matrix using 1-thread-1-primitive-integral scheme
__global__ void cuda_mat_K_PI(
const double *__restrict pbf_xlec,
const int *__restrict pbf_to_cbf,
int n_pbf,
const double *__restrict mat_D,
double *__restrict mat_K_PI,
const double *__restrict mat_Q)
{
__shared__ double elem_K_PI[BLOCKSIZE * BLOCKSIZE];
// each block scans over [i?|k?] and sum up to a primitive K matrix element
int i = blockIdx.x;
int k = blockIdx.y;
// avoid accessing out of bounds elements and make use of ij<=>kl symmetry
if (i >= n_pbf || k > i) { return; }
int ik = cuda_ij2intindex(i,k);
const double *ptr_i = &pbf_xlec[i * 8];
const double *ptr_k = &pbf_xlec[k * 8];
int a = pbf_to_cbf[i];
int c = pbf_to_cbf[k];
// initialize shared array
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0;
for (int j = threadIdx.x; j < n_pbf; j += BLOCKSIZE)
{
int b = pbf_to_cbf[j];
int ab = cuda_ij2intindex(a,b);
const double *ptr_j = &pbf_xlec[j * 8];
for (int l = threadIdx.y; l < n_pbf; l += BLOCKSIZE)
{
int d = pbf_to_cbf[l];
int cd = cuda_ij2intindex(c,d);
int bd = cuda_ij2intindex(b,d);
// Schwartz screening
if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[bd]) < SCREEN_THR) { continue; }
const double *ptr_l = &pbf_xlec[l * 8];
// calculate ERI
double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l);
// NOTE: no doubling for off-diagonal elements of D
elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] += this_eri * mat_D[bd];
}
}
__syncthreads();
// only update mat_K_PI on one thread of the block
if (0 == threadIdx.x && 0 == threadIdx.y)
{
mat_K_PI[ik] = 0.0;
for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) {
for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) {
mat_K_PI[ik] += elem_K_PI[t1 * BLOCKSIZE + t2];
}
}
}
}
|
7c632beec8f94ebbeb2e87e3e100b5be80280d21.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlat.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <faiss/utils/utils.h>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
#include <numeric>
namespace faiss { namespace gpu {
IVFFlat::IVFFlat(GpuResources* resources,
FlatIndex* quantizer,
faiss::MetricType metric,
bool useResidual,
faiss::ScalarQuantizer* scalarQ,
IndicesOptions indicesOptions,
MemorySpace space) :
IVFBase(resources,
quantizer,
scalarQ ? scalarQ->code_size :
sizeof(float) * quantizer->getDim(),
indicesOptions,
space),
metric_(metric),
useResidual_(useResidual),
scalarQ_(scalarQ ? new GpuScalarQuantizer(*scalarQ) : nullptr) {
}
IVFFlat::~IVFFlat() {
}
void
IVFFlat::copyCodeVectorsFromCpu(const float* vecs,
const long* indices,
const std::vector<size_t>& list_length) {
FAISS_ASSERT_FMT(list_length.size() == this->getNumLists(), "Expect list size %zu but %zu received!",
this->getNumLists(), list_length.size());
int64_t numVecs = std::accumulate(list_length.begin(), list_length.end(), 0);
if (numVecs == 0) {
return;
}
auto stream = resources_->getDefaultStreamCurrentDevice();
deviceListLengths_ = list_length;
int64_t lengthInBytes = numVecs * bytesPerVector_;
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(deviceData_->size() + lengthInBytes <= std::numeric_limits<int64_t>::max());
deviceData_->append((unsigned char*) vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
copyIndicesFromCpu_(indices, list_length);
maxListLength_ = 0;
size_t listId = 0;
size_t pos = 0;
size_t size = 0;
thrust::host_vector<void*> hostPointers(deviceListData_.size(), nullptr);
for (auto& device_data : deviceListData_) {
auto data = deviceData_->data() + pos;
size = list_length[listId] * bytesPerVector_;
device_data->reset(data, size, size);
hostPointers[listId] = device_data->data();
maxListLength_ = ::max(maxListLength_, (int)list_length[listId]);
pos += size;
++ listId;
}
deviceListDataPointers_ = hostPointers;
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
void
IVFFlat::addCodeVectorsFromCpu(int listId,
const unsigned char* vecs,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listData = deviceListData_[listId];
auto prevData = listData->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listData->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listData->append(vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevData != listData->data()) {
deviceListDataPointers_[listId] = listData->data();
}
// And our size has changed too
int listLength = listData->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = ::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
int
IVFFlat::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
DeviceTensor<float, 2, true>
listDistance2d(mem, {vecs.getSize(0), 1}, stream);
DeviceTensor<int, 2, true>
listIds2d(mem, {vecs.getSize(0), 1}, stream);
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, listDistance2d, listIds2d, false);
// Calculate residuals for these vectors, if needed
DeviceTensor<float, 2, true>
residuals(mem, {vecs.getSize(0), dim_}, stream);
if (useResidual_) {
quantizer_->computeResidual(vecs, listIds, residuals);
}
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIds.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
for (auto& counts : assignCounts) {
auto& data = deviceListData_[counts.first];
data->resize(data->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (data->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = ::max(maxListLength_, newNumVecs);
}
// Update all pointers to the lists on the device that may have
// changed
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new vectors
{
DeviceTensor<int, 1, true> listOffset(mem, listOffsetHost, stream);
// Now, for each list to which a vector is being assigned, write it
runIVFFlatInvertedListAppend(listIds,
listOffset,
vecs,
indices,
useResidual_,
residuals,
scalarQ_.get(),
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFFlat::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
nprobe = ::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the quantized information
DeviceTensor<float, 2, true>
coarseDistances(mem, {queries.getSize(0), nprobe}, stream);
DeviceTensor<int, 2, true>
coarseIndices(mem, {queries.getSize(0), nprobe}, stream);
// Find the `nprobe` closest lists; we can use int indices both
// internally and externally
quantizer_->query(queries,
nprobe,
coarseDistances,
coarseIndices,
false);
DeviceTensor<float, 3, true>
residualBase(mem, {queries.getSize(0), nprobe, dim_}, stream);
if (useResidual_) {
// Reconstruct vectors from the quantizer
quantizer_->reconstruct(coarseIndices, residualBase);
}
runIVFFlatScan(queries,
coarseIndices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
useResidual_,
residualBase,
scalarQ_.get(),
outDistances,
outIndices,
resources_);
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
} } // namespace
| 7c632beec8f94ebbeb2e87e3e100b5be80280d21.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/impl/IVFFlat.cuh>
#include <faiss/gpu/GpuResources.h>
#include <faiss/gpu/impl/FlatIndex.cuh>
#include <faiss/gpu/impl/IVFAppend.cuh>
#include <faiss/gpu/impl/IVFFlatScan.cuh>
#include <faiss/gpu/impl/RemapIndices.h>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/Float16.cuh>
#include <faiss/gpu/utils/HostTensor.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <faiss/utils/utils.h>
#include <limits>
#include <thrust/host_vector.h>
#include <unordered_map>
#include <numeric>
namespace faiss { namespace gpu {
IVFFlat::IVFFlat(GpuResources* resources,
FlatIndex* quantizer,
faiss::MetricType metric,
bool useResidual,
faiss::ScalarQuantizer* scalarQ,
IndicesOptions indicesOptions,
MemorySpace space) :
IVFBase(resources,
quantizer,
scalarQ ? scalarQ->code_size :
sizeof(float) * quantizer->getDim(),
indicesOptions,
space),
metric_(metric),
useResidual_(useResidual),
scalarQ_(scalarQ ? new GpuScalarQuantizer(*scalarQ) : nullptr) {
}
IVFFlat::~IVFFlat() {
}
void
IVFFlat::copyCodeVectorsFromCpu(const float* vecs,
const long* indices,
const std::vector<size_t>& list_length) {
FAISS_ASSERT_FMT(list_length.size() == this->getNumLists(), "Expect list size %zu but %zu received!",
this->getNumLists(), list_length.size());
int64_t numVecs = std::accumulate(list_length.begin(), list_length.end(), 0);
if (numVecs == 0) {
return;
}
auto stream = resources_->getDefaultStreamCurrentDevice();
deviceListLengths_ = list_length;
int64_t lengthInBytes = numVecs * bytesPerVector_;
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(deviceData_->size() + lengthInBytes <= std::numeric_limits<int64_t>::max());
deviceData_->append((unsigned char*) vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
copyIndicesFromCpu_(indices, list_length);
maxListLength_ = 0;
size_t listId = 0;
size_t pos = 0;
size_t size = 0;
thrust::host_vector<void*> hostPointers(deviceListData_.size(), nullptr);
for (auto& device_data : deviceListData_) {
auto data = deviceData_->data() + pos;
size = list_length[listId] * bytesPerVector_;
device_data->reset(data, size, size);
hostPointers[listId] = device_data->data();
maxListLength_ = std::max(maxListLength_, (int)list_length[listId]);
pos += size;
++ listId;
}
deviceListDataPointers_ = hostPointers;
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
void
IVFFlat::addCodeVectorsFromCpu(int listId,
const unsigned char* vecs,
const long* indices,
size_t numVecs) {
// This list must already exist
FAISS_ASSERT(listId < deviceListData_.size());
auto stream = resources_->getDefaultStreamCurrentDevice();
// If there's nothing to add, then there's nothing we have to do
if (numVecs == 0) {
return;
}
size_t lengthInBytes = numVecs * bytesPerVector_;
auto& listData = deviceListData_[listId];
auto prevData = listData->data();
// We only have int32 length representations on the GPU per each
// list; the length is in sizeof(char)
FAISS_ASSERT(listData->size() + lengthInBytes <=
(size_t) std::numeric_limits<int>::max());
listData->append(vecs,
lengthInBytes,
stream,
true /* exact reserved size */);
// Handle the indices as well
addIndicesFromCpu_(listId, indices, numVecs);
// This list address may have changed due to vector resizing, but
// only bother updating it on the device if it has changed
if (prevData != listData->data()) {
deviceListDataPointers_[listId] = listData->data();
}
// And our size has changed too
int listLength = listData->size() / bytesPerVector_;
deviceListLengths_[listId] = listLength;
// We update this as well, since the multi-pass algorithm uses it
maxListLength_ = std::max(maxListLength_, listLength);
// device_vector add is potentially happening on a different stream
// than our default stream
if (stream != 0) {
streamWait({stream}, {0});
}
}
int
IVFFlat::classifyAndAddVectors(Tensor<float, 2, true>& vecs,
Tensor<long, 1, true>& indices) {
FAISS_ASSERT(vecs.getSize(0) == indices.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == dim_);
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// Number of valid vectors that we actually add; we return this
int numAdded = 0;
DeviceTensor<float, 2, true>
listDistance2d(mem, {vecs.getSize(0), 1}, stream);
DeviceTensor<int, 2, true>
listIds2d(mem, {vecs.getSize(0), 1}, stream);
auto listIds = listIds2d.view<1>({vecs.getSize(0)});
quantizer_->query(vecs, 1, listDistance2d, listIds2d, false);
// Calculate residuals for these vectors, if needed
DeviceTensor<float, 2, true>
residuals(mem, {vecs.getSize(0), dim_}, stream);
if (useResidual_) {
quantizer_->computeResidual(vecs, listIds, residuals);
}
// Copy the lists that we wish to append to back to the CPU
// FIXME: really this can be into pinned memory and a true async
// copy on a different stream; we can start the copy early, but it's
// tiny
HostTensor<int, 1, true> listIdsHost(listIds, stream);
// Now we add the encoded vectors to the individual lists
// First, make sure that there is space available for adding the new
// encoded vectors and indices
// list id -> # being added
std::unordered_map<int, int> assignCounts;
// vector id -> offset in list
// (we already have vector id -> list id in listIds)
HostTensor<int, 1, true> listOffsetHost({listIdsHost.getSize(0)});
for (int i = 0; i < listIds.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
listOffsetHost[i] = -1;
continue;
}
FAISS_ASSERT(listId < numLists_);
++numAdded;
int offset = deviceListData_[listId]->size() / bytesPerVector_;
auto it = assignCounts.find(listId);
if (it != assignCounts.end()) {
offset += it->second;
it->second++;
} else {
assignCounts[listId] = 1;
}
listOffsetHost[i] = offset;
}
// If we didn't add anything (all invalid vectors), no need to
// continue
if (numAdded == 0) {
return 0;
}
// We need to resize the data structures for the inverted lists on
// the GPUs, which means that they might need reallocation, which
// means that their base address may change. Figure out the new base
// addresses, and update those in a batch on the device
{
for (auto& counts : assignCounts) {
auto& data = deviceListData_[counts.first];
data->resize(data->size() + counts.second * bytesPerVector_,
stream);
int newNumVecs = (int) (data->size() / bytesPerVector_);
auto& indices = deviceListIndices_[counts.first];
if ((indicesOptions_ == INDICES_32_BIT) ||
(indicesOptions_ == INDICES_64_BIT)) {
size_t indexSize =
(indicesOptions_ == INDICES_32_BIT) ? sizeof(int) : sizeof(long);
indices->resize(indices->size() + counts.second * indexSize, stream);
} else if (indicesOptions_ == INDICES_CPU) {
// indices are stored on the CPU side
FAISS_ASSERT(counts.first < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[counts.first];
userIndices.resize(newNumVecs);
} else {
// indices are not stored on the GPU or CPU side
FAISS_ASSERT(indicesOptions_ == INDICES_IVF);
}
// This is used by the multi-pass query to decide how much scratch
// space to allocate for intermediate results
maxListLength_ = std::max(maxListLength_, newNumVecs);
}
// Update all pointers to the lists on the device that may have
// changed
{
std::vector<int> listIds(assignCounts.size());
int i = 0;
for (auto& counts : assignCounts) {
listIds[i++] = counts.first;
}
updateDeviceListInfo_(listIds, stream);
}
}
// If we're maintaining the indices on the CPU side, update our
// map. We already resized our map above.
if (indicesOptions_ == INDICES_CPU) {
// We need to maintain the indices on the CPU side
HostTensor<long, 1, true> hostIndices(indices, stream);
for (int i = 0; i < hostIndices.getSize(0); ++i) {
int listId = listIdsHost[i];
// Add vector could be invalid (contains NaNs etc)
if (listId < 0) {
continue;
}
int offset = listOffsetHost[i];
FAISS_ASSERT(listId < listOffsetToUserIndex_.size());
auto& userIndices = listOffsetToUserIndex_[listId];
FAISS_ASSERT(offset < userIndices.size());
userIndices[offset] = hostIndices[i];
}
}
// We similarly need to actually append the new vectors
{
DeviceTensor<int, 1, true> listOffset(mem, listOffsetHost, stream);
// Now, for each list to which a vector is being assigned, write it
runIVFFlatInvertedListAppend(listIds,
listOffset,
vecs,
indices,
useResidual_,
residuals,
scalarQ_.get(),
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
stream);
}
return numAdded;
}
void
IVFFlat::query(Tensor<float, 2, true>& queries,
int nprobe,
int k,
Tensor<float, 2, true>& outDistances,
Tensor<long, 2, true>& outIndices) {
auto& mem = resources_->getMemoryManagerCurrentDevice();
auto stream = resources_->getDefaultStreamCurrentDevice();
// These are caught at a higher level
FAISS_ASSERT(nprobe <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
nprobe = std::min(nprobe, quantizer_->getSize());
FAISS_ASSERT(queries.getSize(1) == dim_);
FAISS_ASSERT(outDistances.getSize(0) == queries.getSize(0));
FAISS_ASSERT(outIndices.getSize(0) == queries.getSize(0));
// Reserve space for the quantized information
DeviceTensor<float, 2, true>
coarseDistances(mem, {queries.getSize(0), nprobe}, stream);
DeviceTensor<int, 2, true>
coarseIndices(mem, {queries.getSize(0), nprobe}, stream);
// Find the `nprobe` closest lists; we can use int indices both
// internally and externally
quantizer_->query(queries,
nprobe,
coarseDistances,
coarseIndices,
false);
DeviceTensor<float, 3, true>
residualBase(mem, {queries.getSize(0), nprobe, dim_}, stream);
if (useResidual_) {
// Reconstruct vectors from the quantizer
quantizer_->reconstruct(coarseIndices, residualBase);
}
runIVFFlatScan(queries,
coarseIndices,
deviceListDataPointers_,
deviceListIndexPointers_,
indicesOptions_,
deviceListLengths_,
maxListLength_,
k,
metric_,
useResidual_,
residualBase,
scalarQ_.get(),
outDistances,
outIndices,
resources_);
// If the GPU isn't storing indices (they are on the CPU side), we
// need to perform the re-mapping here
// FIXME: we might ultimately be calling this function with inputs
// from the CPU, these are unnecessary copies
if (indicesOptions_ == INDICES_CPU) {
HostTensor<long, 2, true> hostOutIndices(outIndices, stream);
ivfOffsetToUserIndex(hostOutIndices.data(),
numLists_,
hostOutIndices.getSize(0),
hostOutIndices.getSize(1),
listOffsetToUserIndex_);
// Copy back to GPU, since the input to this function is on the
// GPU
outIndices.copyFrom(hostOutIndices, stream);
}
}
} } // namespace
|
fb8ff568a178731b2460fac711cd19057ee67e92.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdio>
#include <string>
#include <sstream>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define DocNum 10
#define Doc_Size 9
#define Classes 2
#define BLOCK_SIZE 512
#define DocWords 20
#define DocClass_0 6
#define DocClass_1 4
using namespace std;
using namespace thrust;
__host__ int isin(host_vector<string> vocab, string f) // just check that is string f in vector vocab?
{
// cout << "debug " << f << endl;
if (!vocab.empty())
{
// cout << "vocab not empty" << endl;
for (int i = 0; i < vocab.size(); i++)
{
if (vocab[i].compare(f) == 0)
{
return i;
}
}
}
return -1;
};
__host__ void translateDoc( host_vector<string> vocabList,host_vector<string> docs, int* docWord_arr) {
int index = 0;
for (int i = 0; i < docs.size(); i++) {
stringstream ssin(docs[i]);
string word;
while (ssin >> word)
{
docWord_arr[index] = isin(vocabList, word);
index++;
}
}
}
__host__ void getVocab(host_vector<string> &docList, host_vector<string> &vocabList) {
for (int i = 0; i < docList.size(); i++) {
stringstream ssin(docList[i]);
string word;
// printf("%s\n", word);
while (ssin >> word) {
if (isin(vocabList, word) == -1){
vocabList.push_back(word);
}
}
}
// for (int i = 0; i < DocNum; i++)
// {
// stringstream ssin(docList[i]);
// string word;
// printf("%s\n", word);
// while (ssin >> word)
// {
// if (isin(vocabList, word) == -1)
// {
// vocabList.push_back(word);
// }
// }
// }
}
__global__ void term_ClassN(int * doc, int * termInClass, int nDoc) {
int tid = threadIdx.x;
// printf("this is from term_ClassN thread %d\n", tid);
for (int j = 0; j < nDoc*DocWords; j++) {
if (tid == doc[j]) {
// printf("thread id %d and doc word is %d\n",tid, doc[j]);
termInClass[tid] = termInClass[tid] + 1;
}
}
}
__global__ void find_posterior(int * termInClass, int * nDoc_class, double * posteriorProb) {
int tid = threadIdx.x;
double pos = ((termInClass[tid] + 1) * 1.0) / ((*nDoc_class + 2) * 1.0);
// printf("this is thread %d and pos is %lf add arr index %d\n",tid,pos,tid * (*cur_class));
posteriorProb[tid] = pos;
}
int main() {
// class 0 is ads class 1 is not ads
host_vector<string> c_0;
host_vector<string> c_1;
c_0.push_back("eligator hosting server we have hosting that can serve you Just paid 20 dollars per month for hosting your web");
c_0.push_back("explore our selection of local favorites with 0 dollars delivery fee for your first month 10 dollars order minimum terms");
c_0.push_back("need graphic design help in just a few clicks you can scale your creative output by hiring our pro designer");
c_0.push_back("so your business is up and running now what grow with a marketing crm that gets smarter as you go");
c_0.push_back("start and grow your business with shopify turn what you love into what you sell try shopify for free today");
c_0.push_back("looking for new glasses answer a few quick questions and we will suggest some great looking frames for you free");
c_1.push_back("today I feel like I want to sleep all day I just wanna lay in my bed and go sleep");
c_1.push_back("this week is rainy everyday I have to take my umbrella everyday it make me annoy sometimes when I walk");
c_1.push_back("I am so tired I just want to rest in my vacation time go see outside not sit in table");
c_1.push_back("she go to market to buy some pills but when she went out she forgot her wallet at her home");
host_vector<string> vocabList;
double priorProb[Classes];
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
priorProb[0] = ((DocClass_0 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
priorProb[1] = ((DocClass_1 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
getVocab(c_0, vocabList);
getVocab(c_1, vocabList);
int class_0_arr[DocClass_0*DocWords];
int class_1_arr[DocClass_1*DocWords];
int termInClass_0[DocNum*DocWords];
int termInClass_1[DocNum*DocWords];
for (int t = 0; t < DocNum*DocWords; t++) { // set value in termInClass to 0 for count in function
termInClass_0[t] = 0;
termInClass_1[t] = 0;
}
translateDoc(vocabList, c_0, class_0_arr);
translateDoc(vocabList, c_1, class_1_arr);
// kernel ---------------------------------------------------
int * d_doc_array, *d_termInClass_0,*d_termInClass_1 ;
// class 0
hipMalloc((void **) &d_doc_array, DocClass_0*DocWords*sizeof(int));
hipMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
hipMemcpy(d_doc_array, &class_0_arr, DocClass_0*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( term_ClassN), dim3(1),dim3(vocabList.size()), 0, 0, d_doc_array, d_termInClass_0,DocClass_0);
hipMemcpy(&termInClass_0, d_termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_doc_array);
hipFree(d_termInClass_0);
// ---------------
// class 1
hipMalloc((void **) &d_doc_array, DocClass_1*DocWords*sizeof(int));
hipMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
hipMemcpy(d_doc_array, &class_1_arr, DocClass_1*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( term_ClassN), dim3(1),dim3(vocabList.size()), 0, 0, d_doc_array, d_termInClass_1,DocClass_1);
hipMemcpy(&termInClass_1, d_termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyDeviceToHost);
hipFree(d_doc_array);
hipFree(d_termInClass_1);
int * d_nDoc_class ;
double * d_posteriorProb_class0, *d_posteriorProb_class1;
double posteriorProb_class0[DocWords*DocNum];
double posteriorProb_class1[DocWords*DocNum];
// posteriorProb class 0 ---------------------
int size_of_docClass = DocClass_0;
hipMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
hipMalloc((void **) &d_nDoc_class, sizeof(int));
hipMalloc((void **) &d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double));
hipMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_posteriorProb_class0, &posteriorProb_class0, (Classes*DocWords*DocNum)*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_posterior), dim3(1),dim3(vocabList.size()), 0, 0, d_termInClass_0, d_nDoc_class, d_posteriorProb_class0);
hipMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_termInClass_0);
hipFree(d_nDoc_class);
hipFree(d_posteriorProb_class0);
// -------------------------------------------
// cout << "----------" << endl;
// class 1 -----------------------------------
size_of_docClass = DocClass_1;
hipMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
hipMalloc((void **) &d_nDoc_class, sizeof(int));
hipMalloc((void **) &d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double));
hipMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_posteriorProb_class1, &posteriorProb_class1, (DocWords*DocNum)*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( find_posterior), dim3(1),dim3(vocabList.size()), 0, 0, d_termInClass_1, d_nDoc_class, d_posteriorProb_class1);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
hipMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_termInClass_1);
hipFree(d_nDoc_class);
hipFree(d_posteriorProb_class1);
// --------------------------------------------
// show value of priorProb and posteriorProb
cout << endl <<"This is priorProb" << endl << endl;
for (int pp = 0 ; pp < Classes; pp++) {
cout << priorProb[pp] << endl;
}
cout << endl << "this is posteriorProb" << endl << endl;
cout << "Class 0" << endl << endl;
for (int pp0 = 0; pp0 < vocabList.size(); pp0++) {
cout << posteriorProb_class0[pp0] << endl;
}
cout << endl << "Class 1" << endl << endl;
for (int pp1 = 0; pp1 < vocabList.size(); pp1++) {
cout << posteriorProb_class1[pp1] << endl;
}
// -----------------------------------------
cout << endl << "Time used: " << milliseconds << " milliseconds\n" << endl;;
} | fb8ff568a178731b2460fac711cd19057ee67e92.cu | #include <iostream>
#include <cstdio>
#include <string>
#include <sstream>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#define DocNum 10
#define Doc_Size 9
#define Classes 2
#define BLOCK_SIZE 512
#define DocWords 20
#define DocClass_0 6
#define DocClass_1 4
using namespace std;
using namespace thrust;
__host__ int isin(host_vector<string> vocab, string f) // just check that is string f in vector vocab?
{
// cout << "debug " << f << endl;
if (!vocab.empty())
{
// cout << "vocab not empty" << endl;
for (int i = 0; i < vocab.size(); i++)
{
if (vocab[i].compare(f) == 0)
{
return i;
}
}
}
return -1;
};
__host__ void translateDoc( host_vector<string> vocabList,host_vector<string> docs, int* docWord_arr) {
int index = 0;
for (int i = 0; i < docs.size(); i++) {
stringstream ssin(docs[i]);
string word;
while (ssin >> word)
{
docWord_arr[index] = isin(vocabList, word);
index++;
}
}
}
__host__ void getVocab(host_vector<string> &docList, host_vector<string> &vocabList) {
for (int i = 0; i < docList.size(); i++) {
stringstream ssin(docList[i]);
string word;
// printf("%s\n", word);
while (ssin >> word) {
if (isin(vocabList, word) == -1){
vocabList.push_back(word);
}
}
}
// for (int i = 0; i < DocNum; i++)
// {
// stringstream ssin(docList[i]);
// string word;
// printf("%s\n", word);
// while (ssin >> word)
// {
// if (isin(vocabList, word) == -1)
// {
// vocabList.push_back(word);
// }
// }
// }
}
__global__ void term_ClassN(int * doc, int * termInClass, int nDoc) {
int tid = threadIdx.x;
// printf("this is from term_ClassN thread %d\n", tid);
for (int j = 0; j < nDoc*DocWords; j++) {
if (tid == doc[j]) {
// printf("thread id %d and doc word is %d\n",tid, doc[j]);
termInClass[tid] = termInClass[tid] + 1;
}
}
}
__global__ void find_posterior(int * termInClass, int * nDoc_class, double * posteriorProb) {
int tid = threadIdx.x;
double pos = ((termInClass[tid] + 1) * 1.0) / ((*nDoc_class + 2) * 1.0);
// printf("this is thread %d and pos is %lf add arr index %d\n",tid,pos,tid * (*cur_class));
posteriorProb[tid] = pos;
}
int main() {
// class 0 is ads class 1 is not ads
host_vector<string> c_0;
host_vector<string> c_1;
c_0.push_back("eligator hosting server we have hosting that can serve you Just paid 20 dollars per month for hosting your web");
c_0.push_back("explore our selection of local favorites with 0 dollars delivery fee for your first month 10 dollars order minimum terms");
c_0.push_back("need graphic design help in just a few clicks you can scale your creative output by hiring our pro designer");
c_0.push_back("so your business is up and running now what grow with a marketing crm that gets smarter as you go");
c_0.push_back("start and grow your business with shopify turn what you love into what you sell try shopify for free today");
c_0.push_back("looking for new glasses answer a few quick questions and we will suggest some great looking frames for you free");
c_1.push_back("today I feel like I want to sleep all day I just wanna lay in my bed and go sleep");
c_1.push_back("this week is rainy everyday I have to take my umbrella everyday it make me annoy sometimes when I walk");
c_1.push_back("I am so tired I just want to rest in my vacation time go see outside not sit in table");
c_1.push_back("she go to market to buy some pills but when she went out she forgot her wallet at her home");
host_vector<string> vocabList;
double priorProb[Classes];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
priorProb[0] = ((DocClass_0 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
priorProb[1] = ((DocClass_1 + 1) * 1.0) / (((DocClass_0 + DocClass_1) + 2) * 1.0);
getVocab(c_0, vocabList);
getVocab(c_1, vocabList);
int class_0_arr[DocClass_0*DocWords];
int class_1_arr[DocClass_1*DocWords];
int termInClass_0[DocNum*DocWords];
int termInClass_1[DocNum*DocWords];
for (int t = 0; t < DocNum*DocWords; t++) { // set value in termInClass to 0 for count in function
termInClass_0[t] = 0;
termInClass_1[t] = 0;
}
translateDoc(vocabList, c_0, class_0_arr);
translateDoc(vocabList, c_1, class_1_arr);
// kernel ---------------------------------------------------
int * d_doc_array, *d_termInClass_0,*d_termInClass_1 ;
// class 0
cudaMalloc((void **) &d_doc_array, DocClass_0*DocWords*sizeof(int));
cudaMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
cudaMemcpy(d_doc_array, &class_0_arr, DocClass_0*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(start);
term_ClassN<<<1,vocabList.size()>>>(d_doc_array, d_termInClass_0,DocClass_0);
cudaMemcpy(&termInClass_0, d_termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_doc_array);
cudaFree(d_termInClass_0);
// ---------------
// class 1
cudaMalloc((void **) &d_doc_array, DocClass_1*DocWords*sizeof(int));
cudaMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
cudaMemcpy(d_doc_array, &class_1_arr, DocClass_1*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
term_ClassN<<<1,vocabList.size()>>>(d_doc_array, d_termInClass_1,DocClass_1);
cudaMemcpy(&termInClass_1, d_termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_doc_array);
cudaFree(d_termInClass_1);
int * d_nDoc_class ;
double * d_posteriorProb_class0, *d_posteriorProb_class1;
double posteriorProb_class0[DocWords*DocNum];
double posteriorProb_class1[DocWords*DocNum];
// posteriorProb class 0 ---------------------
int size_of_docClass = DocClass_0;
cudaMalloc((void **) &d_termInClass_0, DocNum*DocWords*sizeof(int));
cudaMalloc((void **) &d_nDoc_class, sizeof(int));
cudaMalloc((void **) &d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double));
cudaMemcpy(d_termInClass_0, &termInClass_0, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_posteriorProb_class0, &posteriorProb_class0, (Classes*DocWords*DocNum)*sizeof(double), cudaMemcpyHostToDevice);
find_posterior<<<1,vocabList.size()>>>(d_termInClass_0, d_nDoc_class, d_posteriorProb_class0);
cudaMemcpy(&posteriorProb_class0, d_posteriorProb_class0, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_termInClass_0);
cudaFree(d_nDoc_class);
cudaFree(d_posteriorProb_class0);
// -------------------------------------------
// cout << "----------" << endl;
// class 1 -----------------------------------
size_of_docClass = DocClass_1;
cudaMalloc((void **) &d_termInClass_1, DocNum*DocWords*sizeof(int));
cudaMalloc((void **) &d_nDoc_class, sizeof(int));
cudaMalloc((void **) &d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double));
cudaMemcpy(d_termInClass_1, &termInClass_1, DocNum*DocWords*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_nDoc_class, &size_of_docClass, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_posteriorProb_class1, &posteriorProb_class1, (DocWords*DocNum)*sizeof(double), cudaMemcpyHostToDevice);
find_posterior<<<1,vocabList.size()>>>(d_termInClass_1, d_nDoc_class, d_posteriorProb_class1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy(&posteriorProb_class1, d_posteriorProb_class1, (DocWords*DocNum)*sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_termInClass_1);
cudaFree(d_nDoc_class);
cudaFree(d_posteriorProb_class1);
// --------------------------------------------
// show value of priorProb and posteriorProb
cout << endl <<"This is priorProb" << endl << endl;
for (int pp = 0 ; pp < Classes; pp++) {
cout << priorProb[pp] << endl;
}
cout << endl << "this is posteriorProb" << endl << endl;
cout << "Class 0" << endl << endl;
for (int pp0 = 0; pp0 < vocabList.size(); pp0++) {
cout << posteriorProb_class0[pp0] << endl;
}
cout << endl << "Class 1" << endl << endl;
for (int pp1 = 0; pp1 < vocabList.size(); pp1++) {
cout << posteriorProb_class1[pp1] << endl;
}
// -----------------------------------------
cout << endl << "Time used: " << milliseconds << " milliseconds\n" << endl;;
} |
4f7dc6c623be7cfb589465ffaca65eb624d35752.hip | // !!! This is a file automatically generated by hipify!!!
#include <CM/gpu.h>
#include <CM/utils.h>
#include <iostream>
#include <hip/hip_runtime.h>
#define LOG2 0.301029996
__global__ void compute(unsigned char* out, Helper helper) {
double threadRowID = blockIdx.x * blockDim.x + threadIdx.x;
double threadColID = blockIdx.y * blockDim.y + threadIdx.y;
int idx = threadRowID * helper.width + threadColID;
double C_x = helper.lowerX + threadColID / double(helper.width) * helper.rangeX;
double C_y = helper.lowerY + threadRowID / double(helper.height) * helper.rangeY;
// do mandel comp. here
double Z_x = 0.0;
double Z_y = 0.0;
double abs_ = 0.0;
char n = 0;
double dc_x = 1;
double dc_y = 0;
double der_x = dc_x;
double der_y = dc_y;
double Znew_x;
double Znew_y;
double dernew_x;
double dernew_y;
// check for abs squared
while(abs_ <= 10000 && n < 64) {
Znew_x = Z_x * Z_x - Z_y * Z_y + C_x;
Znew_y = 2. * Z_x * Z_y + C_y;
dernew_x = (der_x * Z_x - der_y * Z_y)*2 + dc_x;
dernew_y = (der_y * Z_x + der_x * Z_y)*2 + dc_y;
Z_x = Znew_x;
Z_y = Znew_y;
der_x = dernew_x;
der_y = dernew_y;
abs_ = Z_x * Z_x + Z_y * Z_y;
n++;
}
if(helper.color == 1) {
out[3*idx + 0] = n*4;
out[3*idx + 1] = n*4;
out[3*idx + 2] = n*4;
}
else if(helper.color == 2) {
double k = LOG2;
double x = ::log(::log(abs_) / ::pow(2, n)) / k;
out[3*idx+0] = 255 * (1-std::cos(1.0 / LOG2 * x * 1.0)) * 0.5;
out[3*idx+1] = 255 * (1-std::cos(1.0 / LOG2 * x * (1.0 / (3.0 / 1.41421356))));
out[3*idx+2] = 255 * (1-std::cos(2.0 / LOG2 * x * 0.12452650612));
} else if(helper.color == 3) {
double x = ::log(::log(abs_) / ::pow(2, n)) / LOG2;
unsigned char val = 255 * (1 + std::cos(2 * 3.141592653589 * x))*0.5;
out[3*idx + 0] = val;
out[3*idx + 1] = val;
out[3*idx + 2] = val;
} else if(helper.color == 4) {
if(n == 64) {
// not enough iteratores
// inside
out[3*idx + 0] = 0;
out[3*idx + 1] = 0;
out[3*idx + 2] = 255;
} else {
// z / der
double u_x = (Z_x * der_x + Z_y * der_y);
double u_y = (der_y * Z_x - der_x * Z_y);
double u_norm = sqrt(u_x * u_x + u_y * u_y);
u_x = u_x / u_norm;
u_y = u_y / u_norm;
double t = u_x / 1.41421356 + u_y / 1.41421356 + 1.5;
t = t/ (1. + 1.5);
if(t < 0) t = 0;
out[3*idx + 0] = 255 * t;
out[3*idx + 1] = 255 * t;
out[3*idx + 2] = 255 * t;
}
} else if(helper.color == 5) {
if(n == 64) {
// not enough iteratores
// inside
out[3*idx + 0] = 0;
out[3*idx + 1] = 0;
out[3*idx + 2] = 255;
} else {
double u_x = (Z_x * der_x + Z_y * der_y);
double u_y = (der_y * Z_x - der_x * Z_y);
double u_norm = sqrt(u_x * u_x + u_y * u_y);
u_x = u_x / u_norm;
u_y = u_y / u_norm;
double t = u_x * helper.V_x + u_y * helper.V_y + 1.5;
t = t/ (1. + 1.5);
if(t < 0) t = 0;
out[3*idx + 0] = 255 * t;
out[3*idx + 1] = 255 * t;
out[3*idx + 2] = 255 * t;
}
} else {
out[3*idx + 0] = n;
out[3*idx + 1] = n;
out[3*idx + 2] = n;
}
}
void doCalc(unsigned char* out, Helper helper) {
dim3 block(helper.height, 1);
dim3 grid(1, helper.width);
hipLaunchKernelGGL(( compute), dim3(grid), dim3(block), 0, 0, out, helper);
}
void checkCuda()
{
std::cout << "CUDA Compiled version: " << __CUDACC_VER_MAJOR__ << "." << __CUDACC_VER_MINOR__ << std::endl;
int runtime_ver;
hipRuntimeGetVersion(&runtime_ver);
std::cout << "CUDA Runtime version: " << runtime_ver << std::endl;
int driver_ver;
hipDriverGetVersion(&driver_ver);
std::cout << "CUDA Driver version: " << driver_ver << std::endl;
} | 4f7dc6c623be7cfb589465ffaca65eb624d35752.cu | #include <CM/gpu.h>
#include <CM/utils.h>
#include <iostream>
#include <cuda_runtime.h>
#define LOG2 0.301029996
__global__ void compute(unsigned char* out, Helper helper) {
double threadRowID = blockIdx.x * blockDim.x + threadIdx.x;
double threadColID = blockIdx.y * blockDim.y + threadIdx.y;
int idx = threadRowID * helper.width + threadColID;
double C_x = helper.lowerX + threadColID / double(helper.width) * helper.rangeX;
double C_y = helper.lowerY + threadRowID / double(helper.height) * helper.rangeY;
// do mandel comp. here
double Z_x = 0.0;
double Z_y = 0.0;
double abs_ = 0.0;
char n = 0;
double dc_x = 1;
double dc_y = 0;
double der_x = dc_x;
double der_y = dc_y;
double Znew_x;
double Znew_y;
double dernew_x;
double dernew_y;
// check for abs squared
while(abs_ <= 10000 && n < 64) {
Znew_x = Z_x * Z_x - Z_y * Z_y + C_x;
Znew_y = 2. * Z_x * Z_y + C_y;
dernew_x = (der_x * Z_x - der_y * Z_y)*2 + dc_x;
dernew_y = (der_y * Z_x + der_x * Z_y)*2 + dc_y;
Z_x = Znew_x;
Z_y = Znew_y;
der_x = dernew_x;
der_y = dernew_y;
abs_ = Z_x * Z_x + Z_y * Z_y;
n++;
}
if(helper.color == 1) {
out[3*idx + 0] = n*4;
out[3*idx + 1] = n*4;
out[3*idx + 2] = n*4;
}
else if(helper.color == 2) {
double k = LOG2;
double x = std::log(std::log(abs_) / std::pow(2, n)) / k;
out[3*idx+0] = 255 * (1-std::cos(1.0 / LOG2 * x * 1.0)) * 0.5;
out[3*idx+1] = 255 * (1-std::cos(1.0 / LOG2 * x * (1.0 / (3.0 / 1.41421356))));
out[3*idx+2] = 255 * (1-std::cos(2.0 / LOG2 * x * 0.12452650612));
} else if(helper.color == 3) {
double x = std::log(std::log(abs_) / std::pow(2, n)) / LOG2;
unsigned char val = 255 * (1 + std::cos(2 * 3.141592653589 * x))*0.5;
out[3*idx + 0] = val;
out[3*idx + 1] = val;
out[3*idx + 2] = val;
} else if(helper.color == 4) {
if(n == 64) {
// not enough iteratores
// inside
out[3*idx + 0] = 0;
out[3*idx + 1] = 0;
out[3*idx + 2] = 255;
} else {
// z / der
double u_x = (Z_x * der_x + Z_y * der_y);
double u_y = (der_y * Z_x - der_x * Z_y);
double u_norm = sqrt(u_x * u_x + u_y * u_y);
u_x = u_x / u_norm;
u_y = u_y / u_norm;
double t = u_x / 1.41421356 + u_y / 1.41421356 + 1.5;
t = t/ (1. + 1.5);
if(t < 0) t = 0;
out[3*idx + 0] = 255 * t;
out[3*idx + 1] = 255 * t;
out[3*idx + 2] = 255 * t;
}
} else if(helper.color == 5) {
if(n == 64) {
// not enough iteratores
// inside
out[3*idx + 0] = 0;
out[3*idx + 1] = 0;
out[3*idx + 2] = 255;
} else {
double u_x = (Z_x * der_x + Z_y * der_y);
double u_y = (der_y * Z_x - der_x * Z_y);
double u_norm = sqrt(u_x * u_x + u_y * u_y);
u_x = u_x / u_norm;
u_y = u_y / u_norm;
double t = u_x * helper.V_x + u_y * helper.V_y + 1.5;
t = t/ (1. + 1.5);
if(t < 0) t = 0;
out[3*idx + 0] = 255 * t;
out[3*idx + 1] = 255 * t;
out[3*idx + 2] = 255 * t;
}
} else {
out[3*idx + 0] = n;
out[3*idx + 1] = n;
out[3*idx + 2] = n;
}
}
void doCalc(unsigned char* out, Helper helper) {
dim3 block(helper.height, 1);
dim3 grid(1, helper.width);
compute<<<grid, block>>>(out, helper);
}
void checkCuda()
{
std::cout << "CUDA Compiled version: " << __CUDACC_VER_MAJOR__ << "." << __CUDACC_VER_MINOR__ << std::endl;
int runtime_ver;
cudaRuntimeGetVersion(&runtime_ver);
std::cout << "CUDA Runtime version: " << runtime_ver << std::endl;
int driver_ver;
cudaDriverGetVersion(&driver_ver);
std::cout << "CUDA Driver version: " << driver_ver << std::endl;
} |
336fa505bb1b662227cb88f09ff0d93a26f61af0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// User: [email protected]
// ExecutionRequest[P:'despacito.cu',P:1,T:1,args:'',q:'cudalb']
// May 15 2019 21:49:36
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*Kernels CUDA*************************************************************************************************/
__global__ void icicializa(float *array){
array[ blockIdx.x *blockDim.x + threadIdx.x ] = 0;
}
__global__ void calculaGlobal(float *arrayDevice, const float *surface, const float *surfaceCopy){
//arrayDevice[ blockIdx.x *blockDim.x + threadIdx.x ] = 0;
int gid = blockIdx.x *blockDim.x + threadIdx.x;
//arrayDevice[ gid ]; //= /*fabs(*/ surface[gid]; //- surfaceCopy[blockIdx.x * blockDim.x + threadIdx.x]);
arrayDevice[ gid ]=fabs(surface[gid]-surfaceCopy[gid]);
//printf("%lf\n", arrayDevice[ gid ]);
}
__device__ void warpReduce(volatile float* sdata, int tid) {
sdata[tid]=sdata[tid] > sdata[tid + 32] ? sdata[tid]: sdata[tid + 32];
sdata[tid]=sdata[tid] > sdata[tid + 16] ? sdata[tid]: sdata[tid + 16];
sdata[tid]=sdata[tid] > sdata[tid + 8] ? sdata[tid]: sdata[tid + 8];
sdata[tid]=sdata[tid] > sdata[tid + 4] ? sdata[tid]: sdata[tid + 4];
sdata[tid]=sdata[tid] > sdata[tid + 2] ? sdata[tid]: sdata[tid + 2];
sdata[tid]=sdata[tid] > sdata[tid + 1] ? sdata[tid]: sdata[tid + 1];
}
__global__ void reduce0(float *g_idata, float *g_odata, int size) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
/*unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();*/
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] > g_idata[i+blockDim.x] ? g_idata[i] : g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
//if (tid % (2*s) == 0) {
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
if (tid < s)
if(sdata[tid]<sdata[tid + s]){
sdata[tid] = sdata[tid + s];
}
// Reduccin suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // Quin se encarga de hacer la reduccin?
if(g_idata[ i ] < g_idata[ size-1 ]){ // Dnde est el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
}
// Reduccin suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // Quin se encarga de hacer la reduccin?
if(g_idata[ i ] < g_idata[ size-1 ]){ // Dnde est el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
if (tid <= 32) warpReduce(sdata, tid);
__syncthreads();
//}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce1(float *g_idata, float *g_odata, int size) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
/*unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();*/
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] > g_idata[i+blockDim.x] ? g_idata[i] : g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
//if (tid % (2*s) == 0) {
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s)
if(sdata[tid]<sdata[tid + s]){
sdata[tid] = sdata[tid + s];
}
// Reduccin suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // Quin se encarga de hacer la reduccin?
if(g_idata[ i ] < g_idata[ size-1 ]){ // Dnde est el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
}
// Reduccin suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // Quin se encarga de hacer la reduccin?
if(g_idata[ i ] < g_idata[ size-1 ]){ // Dnde est el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
if (tid <= 32) warpReduce(sdata, tid);
__syncthreads();
//}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void actualiza(float *actualizar, const float *surface, const float *surfaceCopy, int columns, int rows) {
int gid = blockIdx.x *blockDim.x + threadIdx.x;
/*int col= gid/columns;
int fila=(blockIdx.x*blockDim.x/columns)+(threadIdx.x/columns);*/
//int col= threadIdx.x;
//int fila= blockIdx.x;
if( blockIdx.x>0 && blockIdx.x<rows-1 && threadIdx.x>0 && threadIdx.x<columns-1){
accessMat(actualizar,blockIdx.x,threadIdx.x)= (accessMat( surfaceCopy, blockIdx.x-1, threadIdx.x ) +
accessMat( surfaceCopy, blockIdx.x+1, threadIdx.x ) +
accessMat( surfaceCopy, blockIdx.x, threadIdx.x-1 ) +
accessMat( surfaceCopy, blockIdx.x, threadIdx.x+1 ) ) / 4;
//(surfaceCopy[gid-blockDim.x]+surfaceCopy[gid+blockDim.x]+surfaceCopy[gid-1]+surfaceCopy[gid+1])/4;
//if(gid==59){printf("columna %d, fila %d\n",col, fila);}
}else{
actualizar[gid]=0.0f;
}
}
//__global__ void actualiza2(float *surface, const float *surfaceCopy2, const float *surfaceCopy) {
//}
/****************************************************************************************************************/
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*********************************************************1,94 s cuda/ 1m 21s cudalb
*/
#define BLOCK_SIZE 128
#define CUDA_CHECK() { \
hipError_t check = hipGetLastError(); \
if ( check != hipSuccess ) { \
printf("Error.... %s \n", hipGetErrorString( check ) ); \
exit( EXIT_FAILURE ); \
} }
unsigned int grid_size = rows*columns / BLOCK_SIZE + (rows*columns % BLOCK_SIZE ? 1 : 0);
unsigned int block_size = BLOCK_SIZE;
unsigned int aux_grid;
float *arrayDevice;
hipMalloc( (void**) &arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *arrayCopyDevice;
hipMalloc( (void**) &arrayCopyDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *globalSurface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
float *globalDevice;
hipMalloc( (void**) &globalDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *actualizaDevice;
hipMalloc( (void**) &actualizaDevice, sizeof(float) * (size_t)rows * (size_t)columns);
//CUDA_CHECK();
/* 3. Initialize surface */
//icicializa<<<grid_size, block_size>>>( arrayDevice );
//hipMemcpy(surface,arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost);
for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
/*printf("%lf\n", accessMat( surface, i, j ));*/accessMat( surface, i, j ) = 0.0;
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
if(first_activation){
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
for( i=0; i<num_focal; i++ ) {
if ( focal[i].active != 1 ) continue;
//int x = focal[i].x;
//int y = focal[i].y;
accessMat( surface, focal[i].x, focal[i].y ) = focal[i].heat;
}
//Copia optimizada
float *aux=surface;
surface=surfaceCopy;
surfaceCopy=aux;
/* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );*/
//hipMemcpy(globalDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
//hipMemcpy(arrayDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
/*hipMemcpy(arrayCopyDevice, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
actualiza<<<columns, rows>>>( globalDevice, arrayDevice, arrayCopyDevice, columns, rows);
hipMemcpy(surface,globalDevice, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost);
//CUDA_CHECK();
/* 4.2.3. Update surface values (skip borders) */
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( aux, i+1, j ) +
accessMat( aux, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){//Probar luego con <1
//Trabajar en los kernel con variables de dispositivo
/* hipMemcpy(arrayDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
//hipMemcpy(globalDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
hipMemcpy(arrayCopyDevice, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns,hipMemcpyHostToDevice);
//Calcular global en cada posicion
calculaGlobal<<<grid_size, block_size>>>( arrayDevice,globalDevice, arrayCopyDevice);
hipMemcpy(globalSurface,arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns, hipMemcpyDeviceToHost);
CUDA_CHECK();*/
//printf("Calculos hechos\n");
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( arrayDevice, i, j ) = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );*/
//for (int redSize = rows*columns; redSize>1; redSize /= 2) {
// Reduccin por niveles en la GPU
//reduceGlobal<<< grid_size, block_size >>>( arrayDevice, arrayDevice, redSize );
/*reduce0<<< grid_size, block_size >>>( arrayDevice, arrayDevice );
CUDA_CHECK();
// Es necesario sincronizar explcitamente los kernels entre niveles?
//}
hipMemcpy(&global_residual, arrayDevice, sizeof(float), hipMemcpyDeviceToHost);
CUDA_CHECK();
*/
/*float abc=0.0f;
printf("%lf\n", globalSurface[4] );
for(int a=0;a<sizeof(globalSurface);a++){
if(globalSurface[a]>abc){
abc=globalSurface[a];
}
// }*/
//printf("Valor calculado con kernel %lf\n", abc );
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) {
global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );
}
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( !(num_deactivated == num_focal && global_residual < THRESHOLD) ){ //flag_stability = 1;
/* 4.3. Move teams */
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * 0.75; // Team efficiency factor
}
}
}
}
}else{
flag_stability = 1;
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * 0.75; // Team efficiency factor
}
}
}
}
}
}
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
| 336fa505bb1b662227cb88f09ff0d93a26f61af0.cu | // User: [email protected]
// ExecutionRequest[P:'despacito.cu',P:1,T:1,args:'',q:'cudalb']
// May 15 2019 21:49:36
#include "cputils.h" // Added by tablon
/*
* Simplified simulation of fire extinguishing
*
* Computacion Paralela, Grado en Informatica (Universidad de Valladolid)
* 2018/2019
*
* v1.4
*
* (c) 2019 Arturo Gonzalez Escribano
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <cputils.h>
#define RADIUS_TYPE_1 3
#define RADIUS_TYPE_2_3 9
#define THRESHOLD 0.1f
/* Structure to store data of an extinguishing team */
typedef struct {
int x,y;
int type;
int target;
} Team;
/* Structure to store data of a fire focal point */
typedef struct {
int x,y;
int start;
int heat;
int active; // States: 0 Not yet activated; 1 Active; 2 Deactivated by a team
} FocalPoint;
/* Macro function to simplify accessing with two coordinates to a flattened array */
#define accessMat( arr, exp1, exp2 ) arr[ (exp1) * columns + (exp2) ]
/*Kernels CUDA*************************************************************************************************/
__global__ void icicializa(float *array){
array[ blockIdx.x *blockDim.x + threadIdx.x ] = 0;
}
__global__ void calculaGlobal(float *arrayDevice, const float *surface, const float *surfaceCopy){
//arrayDevice[ blockIdx.x *blockDim.x + threadIdx.x ] = 0;
int gid = blockIdx.x *blockDim.x + threadIdx.x;
//arrayDevice[ gid ]; //= /*fabs(*/ surface[gid]; //- surfaceCopy[blockIdx.x * blockDim.x + threadIdx.x]);
arrayDevice[ gid ]=fabs(surface[gid]-surfaceCopy[gid]);
//printf("%lf\n", arrayDevice[ gid ]);
}
__device__ void warpReduce(volatile float* sdata, int tid) {
sdata[tid]=sdata[tid] > sdata[tid + 32] ? sdata[tid]: sdata[tid + 32];
sdata[tid]=sdata[tid] > sdata[tid + 16] ? sdata[tid]: sdata[tid + 16];
sdata[tid]=sdata[tid] > sdata[tid + 8] ? sdata[tid]: sdata[tid + 8];
sdata[tid]=sdata[tid] > sdata[tid + 4] ? sdata[tid]: sdata[tid + 4];
sdata[tid]=sdata[tid] > sdata[tid + 2] ? sdata[tid]: sdata[tid + 2];
sdata[tid]=sdata[tid] > sdata[tid + 1] ? sdata[tid]: sdata[tid + 1];
}
__global__ void reduce0(float *g_idata, float *g_odata, int size) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
/*unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();*/
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] > g_idata[i+blockDim.x] ? g_idata[i] : g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
//if (tid % (2*s) == 0) {
for (unsigned int s=blockDim.x/2; s>32; s>>=1) {
if (tid < s)
if(sdata[tid]<sdata[tid + s]){
sdata[tid] = sdata[tid + s];
}
// Reducción suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // ¿Quién se encarga de hacer la reducción?
if(g_idata[ i ] < g_idata[ size-1 ]){ // ¿Dónde está el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
}
// Reducción suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // ¿Quién se encarga de hacer la reducción?
if(g_idata[ i ] < g_idata[ size-1 ]){ // ¿Dónde está el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
if (tid <= 32) warpReduce(sdata, tid);
__syncthreads();
//}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduce1(float *g_idata, float *g_odata, int size) {
extern __shared__ float sdata[];
// each thread loads one element from global to shared mem
/*unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();*/
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
sdata[tid] = g_idata[i] > g_idata[i+blockDim.x] ? g_idata[i] : g_idata[i+blockDim.x];
__syncthreads();
// do reduction in shared mem
//for(unsigned int s=1; s < blockDim.x; s *= 2) {
//if (tid % (2*s) == 0) {
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s)
if(sdata[tid]<sdata[tid + s]){
sdata[tid] = sdata[tid + s];
}
// Reducción suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // ¿Quién se encarga de hacer la reducción?
if(g_idata[ i ] < g_idata[ size-1 ]){ // ¿Dónde está el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
}
// Reducción suplementaria si hay un elemento desparejado
if ( size%2 != 0 && i == 0 ){ // ¿Quién se encarga de hacer la reducción?
if(g_idata[ i ] < g_idata[ size-1 ]){ // ¿Dónde está el elemento desparejado?
g_odata[ i ] = g_idata[ size-1 ];
}
}
__syncthreads();
if (tid <= 32) warpReduce(sdata, tid);
__syncthreads();
//}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void actualiza(float *actualizar, const float *surface, const float *surfaceCopy, int columns, int rows) {
int gid = blockIdx.x *blockDim.x + threadIdx.x;
/*int col= gid/columns;
int fila=(blockIdx.x*blockDim.x/columns)+(threadIdx.x/columns);*/
//int col= threadIdx.x;
//int fila= blockIdx.x;
if( blockIdx.x>0 && blockIdx.x<rows-1 && threadIdx.x>0 && threadIdx.x<columns-1){
accessMat(actualizar,blockIdx.x,threadIdx.x)= (accessMat( surfaceCopy, blockIdx.x-1, threadIdx.x ) +
accessMat( surfaceCopy, blockIdx.x+1, threadIdx.x ) +
accessMat( surfaceCopy, blockIdx.x, threadIdx.x-1 ) +
accessMat( surfaceCopy, blockIdx.x, threadIdx.x+1 ) ) / 4;
//(surfaceCopy[gid-blockDim.x]+surfaceCopy[gid+blockDim.x]+surfaceCopy[gid-1]+surfaceCopy[gid+1])/4;
//if(gid==59){printf("columna %d, fila %d\n",col, fila);}
}else{
actualizar[gid]=0.0f;
}
}
//__global__ void actualiza2(float *surface, const float *surfaceCopy2, const float *surfaceCopy) {
//}
/****************************************************************************************************************/
/*
* Function: Print usage line in stderr
*/
void show_usage( char *program_name ) {
fprintf(stderr,"Usage: %s <config_file> | <command_line_args>\n", program_name );
fprintf(stderr,"\t<config_file> ::= -f <file_name>\n");
fprintf(stderr,"\t<command_line_args> ::= <rows> <columns> <maxIter> <numTeams> [ <teamX> <teamY> <teamType> ... ] <numFocalPoints> [ <focalX> <focalY> <focalStart> <focalTemperature> ... ]\n");
fprintf(stderr,"\n");
}
#ifdef DEBUG
/*
* Function: Print the current state of the simulation
*/
void print_status( int iteration, int rows, int columns, float *surface, int num_teams, Team *teams, int num_focal, FocalPoint *focal, float global_residual ) {
/*
* You don't need to optimize this function, it is only for pretty printing and debugging purposes.
* It is not compiled in the production versions of the program.
* Thus, it is never used when measuring times in the leaderboard
*/
int i,j;
printf("Iteration: %d\n", iteration );
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
for( i=0; i<rows; i++ ) {
printf("|");
for( j=0; j<columns; j++ ) {
char symbol;
if ( accessMat( surface, i, j ) >= 1000 ) symbol = '*';
else if ( accessMat( surface, i, j ) >= 100 ) symbol = '0' + (int)(accessMat( surface, i, j )/100);
else if ( accessMat( surface, i, j ) >= 50 ) symbol = '+';
else if ( accessMat( surface, i, j ) >= 25 ) symbol = '.';
else symbol = '0';
int t;
int flag_team = 0;
for( t=0; t<num_teams; t++ )
if ( teams[t].x == i && teams[t].y == j ) { flag_team = 1; break; }
if ( flag_team ) printf("[%c]", symbol );
else {
int f;
int flag_focal = 0;
for( f=0; f<num_focal; f++ )
if ( focal[f].x == i && focal[f].y == j && focal[f].active == 1 ) { flag_focal = 1; break; }
if ( flag_focal ) printf("(%c)", symbol );
else printf(" %c ", symbol );
}
}
printf("|\n");
}
printf("+");
for( j=0; j<columns; j++ ) printf("---");
printf("+\n");
printf("Global residual: %f\n\n", global_residual);
}
#endif
/*
* MAIN PROGRAM
*/
int main(int argc, char *argv[]) {
int i,j,t;
// Simulation data
int rows, columns, max_iter;
float *surface, *surfaceCopy;
int num_teams, num_focal;
Team *teams;
FocalPoint *focal;
/* 1. Read simulation arguments */
/* 1.1. Check minimum number of arguments */
if (argc<2) {
fprintf(stderr,"-- Error in arguments: No arguments\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
int read_from_file = ! strcmp( argv[1], "-f" );
/* 1.2. Read configuration from file */
if ( read_from_file ) {
/* 1.2.1. Open file */
if (argc<3) {
fprintf(stderr,"-- Error in arguments: file-name argument missing\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
FILE *args = cp_abrir_fichero( argv[2] );
if ( args == NULL ) {
fprintf(stderr,"-- Error in file: not found: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
/* 1.2.2. Read surface and maximum number of iterations */
int ok;
ok = fscanf(args, "%d %d %d", &rows, &columns, &max_iter);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading rows, columns, max_iter from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
if ( surface == NULL || surfaceCopy == NULL ) {
fprintf(stderr,"-- Error allocating: surface structures\n");
exit( EXIT_FAILURE );
}
/* 1.2.3. Teams information */
ok = fscanf(args, "%d", &num_teams );
if ( ok != 1 ) {
fprintf(stderr,"-- Error file, reading num_teams from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
ok = fscanf(args, "%d %d %d", &teams[i].x, &teams[i].y, &teams[i].type);
if ( ok != 3 ) {
fprintf(stderr,"-- Error in file: reading team %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
}
/* 1.2.4. Focal points information */
ok = fscanf(args, "%d", &num_focal );
if ( ok != 1 ) {
fprintf(stderr,"-- Error in file: reading num_focal from file: %s\n", argv[1]);
exit( EXIT_FAILURE );
}
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( focal == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
ok = fscanf(args, "%d %d %d %d", &focal[i].x, &focal[i].y, &focal[i].start, &focal[i].heat);
if ( ok != 4 ) {
fprintf(stderr,"-- Error in file: reading focal point %d from file: %s\n", i, argv[1]);
exit( EXIT_FAILURE );
}
focal[i].active = 0;
}
}
/* 1.3. Read configuration from arguments */
else {
/* 1.3.1. Check minimum number of arguments */
if (argc<6) {
fprintf(stderr, "-- Error in arguments: not enough arguments when reading configuration from the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
/* 1.3.2. Surface and maximum number of iterations */
rows = atoi( argv[1] );
columns = atoi( argv[2] );
max_iter = atoi( argv[3] );
surface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
surfaceCopy = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
/* 1.3.3. Teams information */
num_teams = atoi( argv[4] );
teams = (Team *)malloc( sizeof(Team) * (size_t)num_teams );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
if ( argc < num_teams*3 + 5 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d teams\n", num_teams );
exit( EXIT_FAILURE );
}
for( i=0; i<num_teams; i++ ) {
teams[i].x = atoi( argv[5+i*3] );
teams[i].y = atoi( argv[6+i*3] );
teams[i].type = atoi( argv[7+i*3] );
}
/* 1.3.4. Focal points information */
int focal_args = 5 + i*3;
if ( argc < focal_args+1 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for the number of focal points\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
num_focal = atoi( argv[focal_args] );
focal = (FocalPoint *)malloc( sizeof(FocalPoint) * (size_t)num_focal );
if ( teams == NULL ) {
fprintf(stderr,"-- Error allocating: %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
if ( argc < focal_args + 1 + num_focal*4 ) {
fprintf(stderr,"-- Error in arguments: not enough arguments for %d focal points\n", num_focal );
exit( EXIT_FAILURE );
}
for( i=0; i<num_focal; i++ ) {
focal[i].x = atoi( argv[focal_args+i*4+1] );
focal[i].y = atoi( argv[focal_args+i*4+2] );
focal[i].start = atoi( argv[focal_args+i*4+3] );
focal[i].heat = atoi( argv[focal_args+i*4+4] );
focal[i].active = 0;
}
/* 1.3.5. Sanity check: No extra arguments at the end of line */
if ( argc > focal_args+i*4+1 ) {
fprintf(stderr,"-- Error in arguments: extra arguments at the end of the command line\n");
show_usage( argv[0] );
exit( EXIT_FAILURE );
}
}
#ifdef DEBUG
/* 1.4. Print arguments */
printf("Arguments, Rows: %d, Columns: %d, max_iter: %d\n", rows, columns, max_iter);
printf("Arguments, Teams: %d, Focal points: %d\n", num_teams, num_focal );
for( i=0; i<num_teams; i++ ) {
printf("\tTeam %d, position (%d,%d), type: %d\n", i, teams[i].x, teams[i].y, teams[i].type );
}
for( i=0; i<num_focal; i++ ) {
printf("\tFocal_point %d, position (%d,%d), start time: %d, temperature: %d\n", i,
focal[i].x,
focal[i].y,
focal[i].start,
focal[i].heat );
}
#endif // DEBUG
/* 2. Select GPU and start global timer */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/*
*
* START HERE: DO NOT CHANGE THE CODE ABOVE THIS POINT
*********************************************************1,94 s cuda/ 1m 21s cudalb
*/
#define BLOCK_SIZE 128
#define CUDA_CHECK() { \
cudaError_t check = cudaGetLastError(); \
if ( check != cudaSuccess ) { \
printf("Error.... %s \n", cudaGetErrorString( check ) ); \
exit( EXIT_FAILURE ); \
} }
unsigned int grid_size = rows*columns / BLOCK_SIZE + (rows*columns % BLOCK_SIZE ? 1 : 0);
unsigned int block_size = BLOCK_SIZE;
unsigned int aux_grid;
float *arrayDevice;
cudaMalloc( (void**) &arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *arrayCopyDevice;
cudaMalloc( (void**) &arrayCopyDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *globalSurface = (float *)malloc( sizeof(float) * (size_t)rows * (size_t)columns );
float *globalDevice;
cudaMalloc( (void**) &globalDevice, sizeof(float) * (size_t)rows * (size_t)columns);
float *actualizaDevice;
cudaMalloc( (void**) &actualizaDevice, sizeof(float) * (size_t)rows * (size_t)columns);
//CUDA_CHECK();
/* 3. Initialize surface */
//icicializa<<<grid_size, block_size>>>( arrayDevice );
//cudaMemcpy(surface,arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost);
for( i=0; i<rows; i++ )
for( j=0; j<columns; j++ )
/*printf("%lf\n", accessMat( surface, i, j ));*/accessMat( surface, i, j ) = 0.0;
/* 4. Simulation */
int iter;
int flag_stability = 0;
int first_activation = 0;
for( iter=0; iter<max_iter && ! flag_stability; iter++ ) {
/* 4.1. Activate focal points */
int num_deactivated = 0;
for( i=0; i<num_focal; i++ ) {
if ( focal[i].start == iter ) {
focal[i].active = 1;
if ( ! first_activation ) first_activation = 1;
}
// Count focal points already deactivated by a team
if ( focal[i].active == 2 ) num_deactivated++;
}
if(first_activation){
/* 4.2. Propagate heat (10 steps per each team movement) */
float global_residual = 0.0f;
int step;
for( step=0; step<10; step++ ) {
/* 4.2.1. Update heat on active focal points */
for( i=0; i<num_focal; i++ ) {
if ( focal[i].active != 1 ) continue;
//int x = focal[i].x;
//int y = focal[i].y;
accessMat( surface, focal[i].x, focal[i].y ) = focal[i].heat;
}
//Copia optimizada
float *aux=surface;
surface=surfaceCopy;
surfaceCopy=aux;
/* 4.2.2. Copy values of the surface in ancillary structure (Skip borders) */
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surfaceCopy, i, j ) = accessMat( surface, i, j );*/
//cudaMemcpy(globalDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
//cudaMemcpy(arrayDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
/*cudaMemcpy(arrayCopyDevice, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
actualiza<<<columns, rows>>>( globalDevice, arrayDevice, arrayCopyDevice, columns, rows);
cudaMemcpy(surface,globalDevice, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost);
//CUDA_CHECK();
/* 4.2.3. Update surface values (skip borders) */
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( surface, i, j ) = (
accessMat( surfaceCopy, i-1, j ) +
accessMat( aux, i+1, j ) +
accessMat( aux, i, j-1 ) +
accessMat( surfaceCopy, i, j+1 ) ) / 4;
/* 4.2.4. Compute the maximum residual difference (absolute value) */
if(step==0){//Probar luego con <1
//Trabajar en los kernel con variables de dispositivo
/* cudaMemcpy(arrayDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
//cudaMemcpy(globalDevice, surface, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
cudaMemcpy(arrayCopyDevice, surfaceCopy, sizeof(float) * (size_t)rows * (size_t)columns,cudaMemcpyHostToDevice);
//Calcular global en cada posicion
calculaGlobal<<<grid_size, block_size>>>( arrayDevice,globalDevice, arrayCopyDevice);
cudaMemcpy(globalSurface,arrayDevice, sizeof(float) * (size_t)rows * (size_t)columns, cudaMemcpyDeviceToHost);
CUDA_CHECK();*/
//printf("Calculos hechos\n");
/*for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
accessMat( arrayDevice, i, j ) = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );*/
//for (int redSize = rows*columns; redSize>1; redSize /= 2) {
// Reducción por niveles en la GPU
//reduceGlobal<<< grid_size, block_size >>>( arrayDevice, arrayDevice, redSize );
/*reduce0<<< grid_size, block_size >>>( arrayDevice, arrayDevice );
CUDA_CHECK();
// ¿Es necesario sincronizar explícitamente los kernels entre niveles?
//}
cudaMemcpy(&global_residual, arrayDevice, sizeof(float), cudaMemcpyDeviceToHost);
CUDA_CHECK();
*/
/*float abc=0.0f;
printf("%lf\n", globalSurface[4] );
for(int a=0;a<sizeof(globalSurface);a++){
if(globalSurface[a]>abc){
abc=globalSurface[a];
}
// }*/
//printf("Valor calculado con kernel %lf\n", abc );
for( i=1; i<rows-1; i++ )
for( j=1; j<columns-1; j++ )
if ( fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) ) > global_residual ) {
global_residual = fabs( accessMat( surface, i, j ) - accessMat( surfaceCopy, i, j ) );
}
}
}
/* If the global residual is lower than THRESHOLD, we have reached enough stability, stop simulation at the end of this iteration */
if( !(num_deactivated == num_focal && global_residual < THRESHOLD) ){ //flag_stability = 1;
/* 4.3. Move teams */
for( t=0; t<num_teams; t++ ) {
/* 4.3.1. Choose nearest focal point */
float distance = FLT_MAX;
int target = -1;
for( j=0; j<num_focal; j++ ) {
if ( focal[j].active != 1 ) continue; // Skip non-active focal points
float dx = focal[j].x - teams[t].x;
float dy = focal[j].y - teams[t].y;
float local_distance = sqrtf( dx*dx + dy*dy );
if ( local_distance < distance ) {
distance = local_distance;
target = j;
}
}
/* 4.3.2. Annotate target for the next stage */
teams[t].target = target;
/* 4.3.3. No active focal point to choose, no movement */
if ( target == -1 ) continue;
/* 4.3.4. Move in the focal point direction */
if ( teams[t].type == 1 ) {
// Type 1: Can move in diagonal
if ( focal[target].x < teams[t].x ) teams[t].x--;
if ( focal[target].x > teams[t].x ) teams[t].x++;
if ( focal[target].y < teams[t].y ) teams[t].y--;
if ( focal[target].y > teams[t].y ) teams[t].y++;
}
else if ( teams[t].type == 2 ) {
// Type 2: First in horizontal direction, then in vertical direction
if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
else if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
}
else {
// Type 3: First in vertical direction, then in horizontal direction
if ( focal[target].x < teams[t].x ) teams[t].x--;
else if ( focal[target].x > teams[t].x ) teams[t].x++;
else if ( focal[target].y < teams[t].y ) teams[t].y--;
else if ( focal[target].y > teams[t].y ) teams[t].y++;
}
}
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * 0.75; // Team efficiency factor
}
}
}
}
}else{
flag_stability = 1;
/* 4.4. Team actions */
for( t=0; t<num_teams; t++ ) {
/* 4.4.1. Deactivate the target focal point when it is reached */
int target = teams[t].target;
if ( target != -1 && focal[target].x == teams[t].x && focal[target].y == teams[t].y
&& focal[target].active == 1 )
focal[target].active = 2;
/* 4.4.2. Reduce heat in a circle around the team */
int radius;
// Influence area of fixed radius depending on type
if ( teams[t].type == 1 ) radius = RADIUS_TYPE_1;
else radius = RADIUS_TYPE_2_3;
for( i=teams[t].x-radius; i<=teams[t].x+radius; i++ ) {
for( j=teams[t].y-radius; j<=teams[t].y+radius; j++ ) {
if ( i<1 || i>=rows-1 || j<1 || j>=columns-1 ) continue; // Out of the heated surface
float dx = teams[t].x - i;
float dy = teams[t].y - j;
float distance = sqrtf( dx*dx + dy*dy );
if ( distance <= radius ) {
accessMat( surface, i, j ) = accessMat( surface, i, j ) * 0.75; // Team efficiency factor
}
}
}
}
}
}
#ifdef DEBUG
/* 4.5. DEBUG: Print the current state of the simulation at the end of each iteration */
print_status( iter, rows, columns, surface, num_teams, teams, num_focal, focal, global_residual );
#endif // DEBUG
}
/*
*
* STOP HERE: DO NOT CHANGE THE CODE BELOW THIS POINT
*
*/
/* 5. Stop global time */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 6. Output for leaderboard */
printf("\n");
/* 6.1. Total computation time */
printf("Time: %lf\n", ttotal );
/* 6.2. Results: Number of iterations, position of teams, residual heat on the focal points */
printf("Result: %d", iter);
/*
for (i=0; i<num_teams; i++)
printf(" %d %d", teams[i].x, teams[i].y );
*/
for (i=0; i<num_focal; i++)
printf(" %.6f", accessMat( surface, focal[i].x, focal[i].y ) );
printf("\n");
/* 7. Free resources */
free( teams );
free( focal );
free( surface );
free( surfaceCopy );
/* 8. End */
return 0;
}
|
7df9a502a79ad114a4cefcb7b3dcd5de5a406bb1.hip | // !!! This is a file automatically generated by hipify!!!
//bondsEngine.cu
//Scott Grauer-Gray [email protected]
//Contains main function for running bonds application on a GPU
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "bondsStructs.h"
#include "bondsKernelsGpu.hip"
#include "bondsKernelsCpu.cu"
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
int monthLengthCpu(int month, bool leapYear)
{
int MonthLength[] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
int MonthLeapLength[] = {
31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]);
}
int monthOffsetCpu(int m, bool leapYear)
{
int MonthOffset[] = {
0, 31, 59, 90, 120, 151, // Jan - Jun
181, 212, 243, 273, 304, 334, // Jun - Dec
365 // used in dayOfMonth to bracket day
};
int MonthLeapOffset[] = {
0, 31, 60, 91, 121, 152, // Jan - Jun
182, 213, 244, 274, 305, 335, // Jun - Dec
366 // used in dayOfMonth to bracket day
};
return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]);
}
int yearOffsetCpu(int y)
{
// the list of all December 31st in the preceding year
// e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900
int YearOffset[] = {
// 1900-1909
0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288,
// 1910-1919
3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940,
// 1920-1929
7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593,
// 1930-1939
10958,11323,11688,12054,12419,12784,13149,13515,13880,14245,
// 1940-1949
14610,14976,15341,15706,16071,16437,16802,17167,17532,17898,
// 1950-1959
18263,18628,18993,19359,19724,20089,20454,20820,21185,21550,
// 1960-1969
21915,22281,22646,23011,23376,23742,24107,24472,24837,25203,
// 1970-1979
25568,25933,26298,26664,27029,27394,27759,28125,28490,28855,
// 1980-1989
29220,29586,29951,30316,30681,31047,31412,31777,32142,32508,
// 1990-1999
32873,33238,33603,33969,34334,34699,35064,35430,35795,36160,
// 2000-2009
36525,36891,37256,37621,37986,38352,38717,39082,39447,39813,
// 2010-2019
40178,40543,40908,41274,41639,42004,42369,42735,43100,43465,
// 2020-2029
43830,44196,44561,44926,45291,45657,46022,46387,46752,47118,
// 2030-2039
47483,47848,48213,48579,48944,49309,49674,50040,50405,50770,
// 2040-2049
51135,51501,51866,52231,52596,52962,53327,53692,54057,54423,
// 2050-2059
54788,55153,55518,55884,56249,56614,56979,57345,57710,58075,
// 2060-2069
58440,58806,59171,59536,59901,60267,60632,60997,61362,61728,
// 2070-2079
62093,62458,62823,63189,63554,63919,64284,64650,65015,65380,
// 2080-2089
65745,66111,66476,66841,67206,67572,67937,68302,68667,69033,
// 2090-2099
69398,69763,70128,70494,70859,71224,71589,71955,72320,72685,
// 2100-2109
73050,73415,73780,74145,74510,74876,75241,75606,75971,76337,
// 2110-2119
76702,77067,77432,77798,78163,78528,78893,79259,79624,79989,
// 2120-2129
80354,80720,81085,81450,81815,82181,82546,82911,83276,83642,
// 2130-2139
84007,84372,84737,85103,85468,85833,86198,86564,86929,87294,
// 2140-2149
87659,88025,88390,88755,89120,89486,89851,90216,90581,90947,
// 2150-2159
91312,91677,92042,92408,92773,93138,93503,93869,94234,94599,
// 2160-2169
94964,95330,95695,96060,96425,96791,97156,97521,97886,98252,
// 2170-2179
98617,98982,99347,99713,100078,100443,100808,101174,101539,101904,
// 2180-2189
102269,102635,103000,103365,103730,104096,104461,104826,105191,105557,
// 2190-2199
105922,106287,106652,107018,107383,107748,108113,108479,108844,109209,
// 2200
109574
};
return YearOffset[y-1900];
}
bool isLeapCpu(int y)
{
bool YearIsLeap[] = {
// 1900 is leap in agreement with Excel's bug
// 1900 is out of valid date range anyway
// 1900-1909
true,false,false,false, true,false,false,false, true,false,
// 1910-1919
false,false, true,false,false,false, true,false,false,false,
// 1920-1929
true,false,false,false, true,false,false,false, true,false,
// 1930-1939
false,false, true,false,false,false, true,false,false,false,
// 1940-1949
true,false,false,false, true,false,false,false, true,false,
// 1950-1959
false,false, true,false,false,false, true,false,false,false,
// 1960-1969
true,false,false,false, true,false,false,false, true,false,
// 1970-1979
false,false, true,false,false,false, true,false,false,false,
// 1980-1989
true,false,false,false, true,false,false,false, true,false,
// 1990-1999
false,false, true,false,false,false, true,false,false,false,
// 2000-2009
true,false,false,false, true,false,false,false, true,false,
// 2010-2019
false,false, true,false,false,false, true,false,false,false,
// 2020-2029
true,false,false,false, true,false,false,false, true,false,
// 2030-2039
false,false, true,false,false,false, true,false,false,false,
// 2040-2049
true,false,false,false, true,false,false,false, true,false,
// 2050-2059
false,false, true,false,false,false, true,false,false,false,
// 2060-2069
true,false,false,false, true,false,false,false, true,false,
// 2070-2079
false,false, true,false,false,false, true,false,false,false,
// 2080-2089
true,false,false,false, true,false,false,false, true,false,
// 2090-2099
false,false, true,false,false,false, true,false,false,false,
// 2100-2109
false,false,false,false, true,false,false,false, true,false,
// 2110-2119
false,false, true,false,false,false, true,false,false,false,
// 2120-2129
true,false,false,false, true,false,false,false, true,false,
// 2130-2139
false,false, true,false,false,false, true,false,false,false,
// 2140-2149
true,false,false,false, true,false,false,false, true,false,
// 2150-2159
false,false, true,false,false,false, true,false,false,false,
// 2160-2169
true,false,false,false, true,false,false,false, true,false,
// 2170-2179
false,false, true,false,false,false, true,false,false,false,
// 2180-2189
true,false,false,false, true,false,false,false, true,false,
// 2190-2199
false,false, true,false,false,false, true,false,false,false,
// 2200
false
};
return YearIsLeap[y-1900];
}
bondsDateStruct intializeDateCpu(int d, int m, int y)
{
bondsDateStruct currDate;
currDate.day = d;
currDate.month = m;
currDate.year = y;
bool leap = isLeapCpu(y);
int offset = monthOffsetCpu(m,leap);
currDate.dateSerialNum = d + offset + yearOffsetCpu(y);
return currDate;
}
void runBoundsEngine(const int repeat)
{
//can run multiple times with different number of bonds by uncommenting these lines
int nBondsArray[] = {1000000};
for (int numTime=0; numTime < 1; numTime++)
{
int numBonds = nBondsArray[numTime];
printf("\nNumber of Bonds: %d\n\n", numBonds);
inArgsStruct inArgsHost;
inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));
inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct));
inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType));
srand (123);
int numBond;
for (numBond = 0; numBond < numBonds; numBond++)
{
dataType repoRate = 0.07;
//int repoSettlementDays = 0;
int repoCompounding = SIMPLE_INTEREST;
dataType repoCompoundFreq = 1;
// assume a ten year bond- this is irrelevant
bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2));
bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2));
bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year);
bondStruct bond;
bond.startDate = bondIssueDate;
bond.maturityDate = bondMaturityDate;
bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1;
dataType bondCouponFrequency = 2;
dataType bondCleanPrice = 89.97693786;
bondsYieldTermStruct bondCurve;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.forward = -0.1f; // dummy rate
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
bondCurve.dayCounter = USE_EXACT_DAY;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
dataType dummyStrike = 91.5745;
bondsYieldTermStruct repoCurve;
repoCurve.refDate = todaysDate;
repoCurve.calDate = todaysDate;
repoCurve.forward = repoRate;
repoCurve.compounding = repoCompounding;
repoCurve.frequency = repoCompoundFreq;
repoCurve.dayCounter = USE_SERIAL_NUMS;
inArgsHost.discountCurve[numBond] = bondCurve;
inArgsHost.repoCurve[numBond] = repoCurve;
inArgsHost.currDate[numBond] = todaysDate;
inArgsHost.maturityDate[numBond] = bondMaturityDate;
inArgsHost.bondCleanPrice[numBond] = bondCleanPrice;
inArgsHost.bond[numBond] = bond;
inArgsHost.dummyStrike[numBond] = dummyStrike;
}
printf("Inputs for bond with index %d\n", numBonds/2);
printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month,
inArgsHost.bond[numBonds/2].startDate.day,
inArgsHost.bond[numBonds/2].startDate.year);
printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month,
inArgsHost.bond[numBonds/2].maturityDate.day,
inArgsHost.bond[numBonds/2].maturityDate.year);
printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate);
resultsStruct resultsHost;
resultsStruct resultsFromGpu;
resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
long ktimeGpu = 0;
double timeCpu;
double timeGpu;
struct timeval start;
struct timeval end;
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++)
ktimeGpu += getBondsResultsGpu(inArgsHost, resultsFromGpu, numBonds);
gettimeofday(&end, NULL);
timeGpu = (end.tv_sec - start.tv_sec) * 1e6 + end.tv_usec - start.tv_usec;
printf("Run on GPU\n");
printf("Average kernel execution time on GPU: %lf (ms) \n\n", ktimeGpu * 1e-3 / repeat);
printf("Average processing time on GPU: %f (ms) \n\n", timeGpu * 1e-3 / repeat);
double totPrice = 0.0;
int numBond1;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsFromGpu.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on GPU: %f\n", totPrice);
printf("Outputs on GPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]);
gettimeofday(&start, NULL);
for (int i = 0; i < 2; i++)
getBondsResultsCpu(inArgsHost, resultsHost, numBonds);
gettimeofday(&end, NULL);
timeCpu = (end.tv_sec - start.tv_sec) * 1e6 + end.tv_usec - start.tv_usec;
printf("Run on CPU\n");
printf("Average processing time on CPU: %lf (ms) \n\n", timeCpu * 1e-3 / 2);
totPrice = 0.0;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsHost.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on CPU: %f\n", totPrice);
printf("Outputs on CPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]);
printf("Speedup using GPU: %f\n", (timeCpu / 2) / (timeGpu / repeat) );
free(resultsHost.dirtyPrice);
free(resultsHost.accruedAmountCurrDate);;
free(resultsHost.cleanPrice);;
free(resultsHost.bondForwardVal);;
free(resultsFromGpu.dirtyPrice);
free(resultsFromGpu.accruedAmountCurrDate);;
free(resultsFromGpu.cleanPrice);;
free(resultsFromGpu.bondForwardVal);
free(inArgsHost.discountCurve);
free(inArgsHost.repoCurve);
free(inArgsHost.currDate);
free(inArgsHost.maturityDate);
free(inArgsHost.bondCleanPrice);
free(inArgsHost.bond);
free(inArgsHost.dummyStrike);
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
runBoundsEngine(repeat);
return 0;
}
| 7df9a502a79ad114a4cefcb7b3dcd5de5a406bb1.cu | //bondsEngine.cu
//Scott Grauer-Gray [email protected]
//Contains main function for running bonds application on a GPU
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "bondsStructs.h"
#include "bondsKernelsGpu.cu"
#include "bondsKernelsCpu.cu"
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
int monthLengthCpu(int month, bool leapYear)
{
int MonthLength[] = {
31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
int MonthLeapLength[] = {
31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
};
return (leapYear? MonthLeapLength[month-1] : MonthLength[month-1]);
}
int monthOffsetCpu(int m, bool leapYear)
{
int MonthOffset[] = {
0, 31, 59, 90, 120, 151, // Jan - Jun
181, 212, 243, 273, 304, 334, // Jun - Dec
365 // used in dayOfMonth to bracket day
};
int MonthLeapOffset[] = {
0, 31, 60, 91, 121, 152, // Jan - Jun
182, 213, 244, 274, 305, 335, // Jun - Dec
366 // used in dayOfMonth to bracket day
};
return (leapYear? MonthLeapOffset[m-1] : MonthOffset[m-1]);
}
int yearOffsetCpu(int y)
{
// the list of all December 31st in the preceding year
// e.g. for 1901 yearOffset[1] is 366, that is, December 31 1900
int YearOffset[] = {
// 1900-1909
0, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288,
// 1910-1919
3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940,
// 1920-1929
7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862,10227,10593,
// 1930-1939
10958,11323,11688,12054,12419,12784,13149,13515,13880,14245,
// 1940-1949
14610,14976,15341,15706,16071,16437,16802,17167,17532,17898,
// 1950-1959
18263,18628,18993,19359,19724,20089,20454,20820,21185,21550,
// 1960-1969
21915,22281,22646,23011,23376,23742,24107,24472,24837,25203,
// 1970-1979
25568,25933,26298,26664,27029,27394,27759,28125,28490,28855,
// 1980-1989
29220,29586,29951,30316,30681,31047,31412,31777,32142,32508,
// 1990-1999
32873,33238,33603,33969,34334,34699,35064,35430,35795,36160,
// 2000-2009
36525,36891,37256,37621,37986,38352,38717,39082,39447,39813,
// 2010-2019
40178,40543,40908,41274,41639,42004,42369,42735,43100,43465,
// 2020-2029
43830,44196,44561,44926,45291,45657,46022,46387,46752,47118,
// 2030-2039
47483,47848,48213,48579,48944,49309,49674,50040,50405,50770,
// 2040-2049
51135,51501,51866,52231,52596,52962,53327,53692,54057,54423,
// 2050-2059
54788,55153,55518,55884,56249,56614,56979,57345,57710,58075,
// 2060-2069
58440,58806,59171,59536,59901,60267,60632,60997,61362,61728,
// 2070-2079
62093,62458,62823,63189,63554,63919,64284,64650,65015,65380,
// 2080-2089
65745,66111,66476,66841,67206,67572,67937,68302,68667,69033,
// 2090-2099
69398,69763,70128,70494,70859,71224,71589,71955,72320,72685,
// 2100-2109
73050,73415,73780,74145,74510,74876,75241,75606,75971,76337,
// 2110-2119
76702,77067,77432,77798,78163,78528,78893,79259,79624,79989,
// 2120-2129
80354,80720,81085,81450,81815,82181,82546,82911,83276,83642,
// 2130-2139
84007,84372,84737,85103,85468,85833,86198,86564,86929,87294,
// 2140-2149
87659,88025,88390,88755,89120,89486,89851,90216,90581,90947,
// 2150-2159
91312,91677,92042,92408,92773,93138,93503,93869,94234,94599,
// 2160-2169
94964,95330,95695,96060,96425,96791,97156,97521,97886,98252,
// 2170-2179
98617,98982,99347,99713,100078,100443,100808,101174,101539,101904,
// 2180-2189
102269,102635,103000,103365,103730,104096,104461,104826,105191,105557,
// 2190-2199
105922,106287,106652,107018,107383,107748,108113,108479,108844,109209,
// 2200
109574
};
return YearOffset[y-1900];
}
bool isLeapCpu(int y)
{
bool YearIsLeap[] = {
// 1900 is leap in agreement with Excel's bug
// 1900 is out of valid date range anyway
// 1900-1909
true,false,false,false, true,false,false,false, true,false,
// 1910-1919
false,false, true,false,false,false, true,false,false,false,
// 1920-1929
true,false,false,false, true,false,false,false, true,false,
// 1930-1939
false,false, true,false,false,false, true,false,false,false,
// 1940-1949
true,false,false,false, true,false,false,false, true,false,
// 1950-1959
false,false, true,false,false,false, true,false,false,false,
// 1960-1969
true,false,false,false, true,false,false,false, true,false,
// 1970-1979
false,false, true,false,false,false, true,false,false,false,
// 1980-1989
true,false,false,false, true,false,false,false, true,false,
// 1990-1999
false,false, true,false,false,false, true,false,false,false,
// 2000-2009
true,false,false,false, true,false,false,false, true,false,
// 2010-2019
false,false, true,false,false,false, true,false,false,false,
// 2020-2029
true,false,false,false, true,false,false,false, true,false,
// 2030-2039
false,false, true,false,false,false, true,false,false,false,
// 2040-2049
true,false,false,false, true,false,false,false, true,false,
// 2050-2059
false,false, true,false,false,false, true,false,false,false,
// 2060-2069
true,false,false,false, true,false,false,false, true,false,
// 2070-2079
false,false, true,false,false,false, true,false,false,false,
// 2080-2089
true,false,false,false, true,false,false,false, true,false,
// 2090-2099
false,false, true,false,false,false, true,false,false,false,
// 2100-2109
false,false,false,false, true,false,false,false, true,false,
// 2110-2119
false,false, true,false,false,false, true,false,false,false,
// 2120-2129
true,false,false,false, true,false,false,false, true,false,
// 2130-2139
false,false, true,false,false,false, true,false,false,false,
// 2140-2149
true,false,false,false, true,false,false,false, true,false,
// 2150-2159
false,false, true,false,false,false, true,false,false,false,
// 2160-2169
true,false,false,false, true,false,false,false, true,false,
// 2170-2179
false,false, true,false,false,false, true,false,false,false,
// 2180-2189
true,false,false,false, true,false,false,false, true,false,
// 2190-2199
false,false, true,false,false,false, true,false,false,false,
// 2200
false
};
return YearIsLeap[y-1900];
}
bondsDateStruct intializeDateCpu(int d, int m, int y)
{
bondsDateStruct currDate;
currDate.day = d;
currDate.month = m;
currDate.year = y;
bool leap = isLeapCpu(y);
int offset = monthOffsetCpu(m,leap);
currDate.dateSerialNum = d + offset + yearOffsetCpu(y);
return currDate;
}
void runBoundsEngine(const int repeat)
{
//can run multiple times with different number of bonds by uncommenting these lines
int nBondsArray[] = {1000000};
for (int numTime=0; numTime < 1; numTime++)
{
int numBonds = nBondsArray[numTime];
printf("\nNumber of Bonds: %d\n\n", numBonds);
inArgsStruct inArgsHost;
inArgsHost.discountCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.repoCurve = (bondsYieldTermStruct*)malloc(numBonds*sizeof(bondsYieldTermStruct));
inArgsHost.currDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.maturityDate = (bondsDateStruct*)malloc(numBonds*sizeof(bondsDateStruct));
inArgsHost.bondCleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));
inArgsHost.bond = (bondStruct*)malloc(numBonds*sizeof(bondStruct));
inArgsHost.dummyStrike = (dataType*)malloc(numBonds*sizeof(dataType));
srand (123);
int numBond;
for (numBond = 0; numBond < numBonds; numBond++)
{
dataType repoRate = 0.07;
//int repoSettlementDays = 0;
int repoCompounding = SIMPLE_INTEREST;
dataType repoCompoundFreq = 1;
// assume a ten year bond- this is irrelevant
bondsDateStruct bondIssueDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 1999 - (rand() % 2));
bondsDateStruct bondMaturityDate = intializeDateCpu(rand() % 28 + 1, rand() % 12 + 1, 2000 + (rand() % 2));
bondsDateStruct todaysDate = intializeDateCpu(bondMaturityDate.day-1,bondMaturityDate.month,bondMaturityDate.year);
bondStruct bond;
bond.startDate = bondIssueDate;
bond.maturityDate = bondMaturityDate;
bond.rate = 0.08 + ((float)rand()/(float)RAND_MAX - 0.5)*0.1;
dataType bondCouponFrequency = 2;
dataType bondCleanPrice = 89.97693786;
bondsYieldTermStruct bondCurve;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.forward = -0.1f; // dummy rate
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
bondCurve.dayCounter = USE_EXACT_DAY;
bondCurve.refDate = todaysDate;
bondCurve.calDate = todaysDate;
bondCurve.compounding = COMPOUNDED_INTEREST;
bondCurve.frequency = bondCouponFrequency;
dataType dummyStrike = 91.5745;
bondsYieldTermStruct repoCurve;
repoCurve.refDate = todaysDate;
repoCurve.calDate = todaysDate;
repoCurve.forward = repoRate;
repoCurve.compounding = repoCompounding;
repoCurve.frequency = repoCompoundFreq;
repoCurve.dayCounter = USE_SERIAL_NUMS;
inArgsHost.discountCurve[numBond] = bondCurve;
inArgsHost.repoCurve[numBond] = repoCurve;
inArgsHost.currDate[numBond] = todaysDate;
inArgsHost.maturityDate[numBond] = bondMaturityDate;
inArgsHost.bondCleanPrice[numBond] = bondCleanPrice;
inArgsHost.bond[numBond] = bond;
inArgsHost.dummyStrike[numBond] = dummyStrike;
}
printf("Inputs for bond with index %d\n", numBonds/2);
printf("Bond Issue Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].startDate.month,
inArgsHost.bond[numBonds/2].startDate.day,
inArgsHost.bond[numBonds/2].startDate.year);
printf("Bond Maturity Date: %d-%d-%d\n", inArgsHost.bond[numBonds/2].maturityDate.month,
inArgsHost.bond[numBonds/2].maturityDate.day,
inArgsHost.bond[numBonds/2].maturityDate.year);
printf("Bond rate: %f\n\n", inArgsHost.bond[numBonds/2].rate);
resultsStruct resultsHost;
resultsStruct resultsFromGpu;
resultsHost.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsHost.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsHost.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.dirtyPrice = (dataType*)malloc(numBonds*sizeof(dataType));
resultsFromGpu.accruedAmountCurrDate = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.cleanPrice = (dataType*)malloc(numBonds*sizeof(dataType));;
resultsFromGpu.bondForwardVal = (dataType*)malloc(numBonds*sizeof(dataType));;
long ktimeGpu = 0;
double timeCpu;
double timeGpu;
struct timeval start;
struct timeval end;
gettimeofday(&start, NULL);
for (int i = 0; i < repeat; i++)
ktimeGpu += getBondsResultsGpu(inArgsHost, resultsFromGpu, numBonds);
gettimeofday(&end, NULL);
timeGpu = (end.tv_sec - start.tv_sec) * 1e6 + end.tv_usec - start.tv_usec;
printf("Run on GPU\n");
printf("Average kernel execution time on GPU: %lf (ms) \n\n", ktimeGpu * 1e-3 / repeat);
printf("Average processing time on GPU: %f (ms) \n\n", timeGpu * 1e-3 / repeat);
double totPrice = 0.0;
int numBond1;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsFromGpu.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on GPU: %f\n", totPrice);
printf("Outputs on GPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsFromGpu.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsFromGpu.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsFromGpu.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsFromGpu.bondForwardVal[numBonds/2]);
gettimeofday(&start, NULL);
for (int i = 0; i < 2; i++)
getBondsResultsCpu(inArgsHost, resultsHost, numBonds);
gettimeofday(&end, NULL);
timeCpu = (end.tv_sec - start.tv_sec) * 1e6 + end.tv_usec - start.tv_usec;
printf("Run on CPU\n");
printf("Average processing time on CPU: %lf (ms) \n\n", timeCpu * 1e-3 / 2);
totPrice = 0.0;
for (numBond1= 0; numBond1< numBonds; numBond1++)
{
totPrice += resultsHost.dirtyPrice[numBond1];
}
printf("Sum of output dirty prices on CPU: %f\n", totPrice);
printf("Outputs on CPU for bond with index %d: \n", numBonds/2);
printf("Dirty Price: %f\n", resultsHost.dirtyPrice[numBonds/2]);
printf("Accrued Amount: %f\n", resultsHost.accruedAmountCurrDate[numBonds/2]);
printf("Clean Price: %f\n", resultsHost.cleanPrice[numBonds/2]);
printf("Bond Forward Val: %f\n\n", resultsHost.bondForwardVal[numBonds/2]);
printf("Speedup using GPU: %f\n", (timeCpu / 2) / (timeGpu / repeat) );
free(resultsHost.dirtyPrice);
free(resultsHost.accruedAmountCurrDate);;
free(resultsHost.cleanPrice);;
free(resultsHost.bondForwardVal);;
free(resultsFromGpu.dirtyPrice);
free(resultsFromGpu.accruedAmountCurrDate);;
free(resultsFromGpu.cleanPrice);;
free(resultsFromGpu.bondForwardVal);
free(inArgsHost.discountCurve);
free(inArgsHost.repoCurve);
free(inArgsHost.currDate);
free(inArgsHost.maturityDate);
free(inArgsHost.bondCleanPrice);
free(inArgsHost.bond);
free(inArgsHost.dummyStrike);
}
}
int main(int argc, char* argv[])
{
if (argc != 2) {
printf("Usage: %s <repeat>\n", argv[0]);
return 1;
}
const int repeat = atoi(argv[1]);
runBoundsEngine(repeat);
return 0;
}
|
89d1344445a2078a5d59bd68059ec593429c60e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void modify_i_j( int width, int height, int pitch, float *d_array, int i, int j, float change_to ){
//we want to change the [i,j]-th of the 2-dim array
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
//we can do index by pointer:
//if ((idx == i) && (idy == j)){
//float* row = (float *)((char*)d_array + idx*pitch);
// row[idy] = change_to;
//}
//or, a more convenient way is to do index just use idx and idy
if ((idx==i)&&(idy==j))
{
d_array[idx*(pitch/sizeof(float)) + idy] = change_to;
}
} | 89d1344445a2078a5d59bd68059ec593429c60e1.cu | #include "includes.h"
__global__ void modify_i_j( int width, int height, int pitch, float *d_array, int i, int j, float change_to ){
//we want to change the [i,j]-th of the 2-dim array
int idx = blockIdx.x; //row
int idy = threadIdx.x; //column
//we can do index by pointer:
//if ((idx == i) && (idy == j)){
//float* row = (float *)((char*)d_array + idx*pitch);
// row[idy] = change_to;
//}
//or, a more convenient way is to do index just use idx and idy
if ((idx==i)&&(idy==j))
{
d_array[idx*(pitch/sizeof(float)) + idy] = change_to;
}
} |
33f093fb09e3ceb91ad2f8aebed09696ef32fc4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file matrixMult.cu
* @details This file describes the functions belonging to MatrixMult class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "matrixMult.h"
#include "matrixMult_kernel.cu"
MatrixMult::MatrixMult(int *sizes)
{
valB = 0.01f;
h_A_MM = NULL;
h_B_MM = NULL;
h_C_MM = NULL;
d_A_MM = NULL;
d_B_MM = NULL;
d_C_MM = NULL;
uiWA_MM = sizes[0];
uiHA_MM = sizes[1];
uiWB_MM = sizes[2];
uiHB_MM = sizes[3];
uiWC_MM = uiWB_MM;
uiHC_MM = uiHA_MM;
mem_size_A_MM = uiWA_MM * uiHA_MM;
mem_size_B_MM = uiWB_MM * uiHB_MM;
mem_size_C_MM = uiWC_MM * uiHC_MM;
blockSize = 16;
}
MatrixMult::~MatrixMult()
{
if(h_A_MM!=NULL) hipHostFree(h_A_MM);
if(h_B_MM!=NULL) hipHostFree(h_B_MM);
if(h_C_MM!=NULL) hipHostFree(h_C_MM);
}
void MatrixMult::allocHostMemory(void)
{
hipHostMalloc((void **)&h_A_MM, mem_size_A_MM * sizeof(float));
hipHostMalloc((void **)&h_B_MM, mem_size_B_MM * sizeof(float));
hipHostMalloc((void **)&h_C_MM, mem_size_C_MM * sizeof(float));
}
void MatrixMult::freeHostMemory(void)
{
if(h_A_MM!=NULL) hipHostFree(h_A_MM);
if(h_B_MM!=NULL) hipHostFree(h_B_MM);
if(h_C_MM!=NULL) hipHostFree(h_C_MM);
}
void MatrixMult::allocDeviceMemory(void)
{
hipMalloc((void**) &d_A_MM, mem_size_A_MM * sizeof(float));
hipMalloc((void**) &d_B_MM, mem_size_B_MM * sizeof(float));
hipMalloc((void**) &d_C_MM, mem_size_C_MM * sizeof(float));
}
void MatrixMult::freeDeviceMemory(void)
{
if(d_A_MM!=NULL) hipFree(d_A_MM);
if(d_B_MM!=NULL) hipFree(d_B_MM);
if(d_C_MM!=NULL) hipFree(d_C_MM);
}
void MatrixMult::constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
void MatrixMult::generatingData(void)
{
constantInit(h_A_MM, mem_size_A_MM, 1.0f);
constantInit(h_B_MM, mem_size_B_MM, valB);
hipMemset(d_C_MM, 0, mem_size_C_MM * sizeof(float));
}
void MatrixMult::memHostToDeviceAsync(hipStream_t stream)
{
hipMemcpyAsync(d_A_MM, h_A_MM, mem_size_A_MM*sizeof(float), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(d_B_MM, h_B_MM, mem_size_B_MM*sizeof(float), hipMemcpyHostToDevice, stream);
}
void MatrixMult::memHostToDevice(void)
{
hipMemcpy(d_A_MM, h_A_MM, mem_size_A_MM*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B_MM, h_B_MM, mem_size_B_MM*sizeof(float), hipMemcpyHostToDevice);
}
void MatrixMult::memDeviceToHostAsync(hipStream_t stream)
{
hipMemcpyAsync(h_C_MM, d_C_MM, mem_size_C_MM*sizeof(float), hipMemcpyDeviceToHost, stream);
}
void MatrixMult::memDeviceToHost(void)
{
hipMemcpy(h_C_MM, d_C_MM, mem_size_C_MM*sizeof(float), hipMemcpyDeviceToHost);
}
void MatrixMult::launch_kernel_Async(hipStream_t stream)
{
dim3 threadsS(blockSize, blockSize);
dim3 blocksS(uiWC_MM/ threadsS.x, uiHC_MM / threadsS.y);
hipLaunchKernelGGL(( matrixMul), dim3(blocksS), dim3(threadsS), 0, stream , d_C_MM,
d_A_MM,
d_B_MM, uiWA_MM, uiWB_MM);
}
void MatrixMult::launch_kernel(void)
{
dim3 threadsS(blockSize, blockSize);
dim3 blocksS(uiWC_MM/ threadsS.x, uiHC_MM / threadsS.y);
hipLaunchKernelGGL(( matrixMul), dim3(blocksS), dim3(threadsS), 0, 0, d_C_MM,
d_A_MM,
d_B_MM, uiWA_MM, uiWB_MM);
}
void MatrixMult::checkResults(void)
{
bool correct = true;
//for (int i = 0; i < size_C_MM; i++)
for (int i = 0; i < mem_size_C_MM; i++)
{
if (fabs(h_C_MM[i] - (uiWA_MM * valB)) > 0.1)//1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C_MM[i], uiWA_MM*valB);
correct = false;
}
}
if(correct == false)
{
printf("Error Matrix Multiplication\n");
}
}
void MatrixMult::getBytesHTD(int *bytes_htd)
{
*bytes_htd = mem_size_A_MM*sizeof(float) + mem_size_B_MM*sizeof(float);
}
void MatrixMult::getBytesDTH(int *bytes_dth)
{
*bytes_dth = mem_size_C_MM * sizeof(float);
}
void MatrixMult::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
} | 33f093fb09e3ceb91ad2f8aebed09696ef32fc4e.cu | /**
* @file matrixMult.cu
* @details This file describes the functions belonging to MatrixMult class.
* @author Antonio Jose Lazaro Munoz.
* @date 20/02/2016
*/
#include "matrixMult.h"
#include "matrixMult_kernel.cu"
MatrixMult::MatrixMult(int *sizes)
{
valB = 0.01f;
h_A_MM = NULL;
h_B_MM = NULL;
h_C_MM = NULL;
d_A_MM = NULL;
d_B_MM = NULL;
d_C_MM = NULL;
uiWA_MM = sizes[0];
uiHA_MM = sizes[1];
uiWB_MM = sizes[2];
uiHB_MM = sizes[3];
uiWC_MM = uiWB_MM;
uiHC_MM = uiHA_MM;
mem_size_A_MM = uiWA_MM * uiHA_MM;
mem_size_B_MM = uiWB_MM * uiHB_MM;
mem_size_C_MM = uiWC_MM * uiHC_MM;
blockSize = 16;
}
MatrixMult::~MatrixMult()
{
if(h_A_MM!=NULL) cudaFreeHost(h_A_MM);
if(h_B_MM!=NULL) cudaFreeHost(h_B_MM);
if(h_C_MM!=NULL) cudaFreeHost(h_C_MM);
}
void MatrixMult::allocHostMemory(void)
{
cudaMallocHost((void **)&h_A_MM, mem_size_A_MM * sizeof(float));
cudaMallocHost((void **)&h_B_MM, mem_size_B_MM * sizeof(float));
cudaMallocHost((void **)&h_C_MM, mem_size_C_MM * sizeof(float));
}
void MatrixMult::freeHostMemory(void)
{
if(h_A_MM!=NULL) cudaFreeHost(h_A_MM);
if(h_B_MM!=NULL) cudaFreeHost(h_B_MM);
if(h_C_MM!=NULL) cudaFreeHost(h_C_MM);
}
void MatrixMult::allocDeviceMemory(void)
{
cudaMalloc((void**) &d_A_MM, mem_size_A_MM * sizeof(float));
cudaMalloc((void**) &d_B_MM, mem_size_B_MM * sizeof(float));
cudaMalloc((void**) &d_C_MM, mem_size_C_MM * sizeof(float));
}
void MatrixMult::freeDeviceMemory(void)
{
if(d_A_MM!=NULL) cudaFree(d_A_MM);
if(d_B_MM!=NULL) cudaFree(d_B_MM);
if(d_C_MM!=NULL) cudaFree(d_C_MM);
}
void MatrixMult::constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
void MatrixMult::generatingData(void)
{
constantInit(h_A_MM, mem_size_A_MM, 1.0f);
constantInit(h_B_MM, mem_size_B_MM, valB);
cudaMemset(d_C_MM, 0, mem_size_C_MM * sizeof(float));
}
void MatrixMult::memHostToDeviceAsync(cudaStream_t stream)
{
cudaMemcpyAsync(d_A_MM, h_A_MM, mem_size_A_MM*sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(d_B_MM, h_B_MM, mem_size_B_MM*sizeof(float), cudaMemcpyHostToDevice, stream);
}
void MatrixMult::memHostToDevice(void)
{
cudaMemcpy(d_A_MM, h_A_MM, mem_size_A_MM*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B_MM, h_B_MM, mem_size_B_MM*sizeof(float), cudaMemcpyHostToDevice);
}
void MatrixMult::memDeviceToHostAsync(cudaStream_t stream)
{
cudaMemcpyAsync(h_C_MM, d_C_MM, mem_size_C_MM*sizeof(float), cudaMemcpyDeviceToHost, stream);
}
void MatrixMult::memDeviceToHost(void)
{
cudaMemcpy(h_C_MM, d_C_MM, mem_size_C_MM*sizeof(float), cudaMemcpyDeviceToHost);
}
void MatrixMult::launch_kernel_Async(cudaStream_t stream)
{
dim3 threadsS(blockSize, blockSize);
dim3 blocksS(uiWC_MM/ threadsS.x, uiHC_MM / threadsS.y);
matrixMul<<< blocksS, threadsS, 0, stream >>>(d_C_MM,
d_A_MM,
d_B_MM, uiWA_MM, uiWB_MM);
}
void MatrixMult::launch_kernel(void)
{
dim3 threadsS(blockSize, blockSize);
dim3 blocksS(uiWC_MM/ threadsS.x, uiHC_MM / threadsS.y);
matrixMul<<< blocksS, threadsS>>>(d_C_MM,
d_A_MM,
d_B_MM, uiWA_MM, uiWB_MM);
}
void MatrixMult::checkResults(void)
{
bool correct = true;
//for (int i = 0; i < size_C_MM; i++)
for (int i = 0; i < mem_size_C_MM; i++)
{
if (fabs(h_C_MM[i] - (uiWA_MM * valB)) > 0.1)//1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C_MM[i], uiWA_MM*valB);
correct = false;
}
}
if(correct == false)
{
printf("Error Matrix Multiplication\n");
}
}
void MatrixMult::getBytesHTD(int *bytes_htd)
{
*bytes_htd = mem_size_A_MM*sizeof(float) + mem_size_B_MM*sizeof(float);
}
void MatrixMult::getBytesDTH(int *bytes_dth)
{
*bytes_dth = mem_size_C_MM * sizeof(float);
}
void MatrixMult::getTimeEstimations_HTD_DTH(int gpu, float *estimated_time_HTD, float *estimated_time_DTH,
float *estimated_overlapped_time_HTD, float *estimated_overlapped_time_DTH,
float LoHTD, float LoDTH, float GHTD, float GDTH, float overlappedGHTD, float overlappedGDTH)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, gpu);
int bytes_HTD;
int bytes_DTH;
getBytesHTD(&bytes_HTD);
getBytesDTH(&bytes_DTH);
*estimated_time_HTD = LoHTD + (bytes_HTD) * GHTD;
*estimated_overlapped_time_HTD = 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_HTD = LoHTD + (bytes_HTD) * overlappedGHTD;
*estimated_time_DTH = LoDTH + (bytes_DTH) * GDTH;
*estimated_overlapped_time_DTH= 0.0;
if(props.asyncEngineCount == 2)
*estimated_overlapped_time_DTH= LoDTH + (bytes_DTH) * overlappedGDTH;
} |
Exercise_1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
#define NSIZE 2097152
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
hipEvent_t start, end;
float eventEtime;
// calculate block number
nBlocks = (nsize-1) / nThreads + 1;
printf("Number of elements: %d\n", nsize);
printf("GPU execution with %d blocks each one of %d threads\n", nBlocks, nThreads);
// allocation and initialization of host buffers
h_a = (float*) malloc (nsize * sizeof(float));
h_b = (float*) malloc (nsize * sizeof(float));
h_c = (float*) malloc (nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDE code ----------------
// allocation of device buffers
hipMalloc((void**)&d_a, nsize * sizeof(float));
hipMalloc((void**)&d_b, nsize * sizeof(float));
hipMalloc((void**)&d_c, nsize * sizeof(float));
//------------------------------------
// creation of cuda events: start, end
hipEventCreate(&start);
hipEventCreate(&end);
printf ("\nGPU computation ... ");
hipEventRecord(start,0);
//-- insert CUDA code ----------------
// host to device buffer copies
hipMemcpy(d_a, h_a, nsize * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, nsize * sizeof(float), hipMemcpyHostToDevice);
//------------------------------------
//-- insert CUDA code ----------------
// arrayFunc kernel launch
hipLaunchKernelGGL(( arrayFunc), dim3(nBlocks),dim3(nThreads), 0, 0, d_a, d_b, d_c, nsize);
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
hipMemcpy(h_c, d_c, nsize*sizeof(float), hipMemcpyDeviceToHost);
//------------------------------------
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
hipHostMalloc((void**)&cpuResult, nsize * sizeof(float));
hipEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
hipEventRecord(end,0);
hipEventSynchronize(end);
hipEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
hipEventDestroy(start);
hipEventDestroy(end);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// free resources on host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
| Exercise_1.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
#define NSIZE 2097152
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
cudaEvent_t start, end;
float eventEtime;
// calculate block number
nBlocks = (nsize-1) / nThreads + 1;
printf("Number of elements: %d\n", nsize);
printf("GPU execution with %d blocks each one of %d threads\n", nBlocks, nThreads);
// allocation and initialization of host buffers
h_a = (float*) malloc (nsize * sizeof(float));
h_b = (float*) malloc (nsize * sizeof(float));
h_c = (float*) malloc (nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDE code ----------------
// allocation of device buffers
cudaMalloc((void**)&d_a, nsize * sizeof(float));
cudaMalloc((void**)&d_b, nsize * sizeof(float));
cudaMalloc((void**)&d_c, nsize * sizeof(float));
//------------------------------------
// creation of cuda events: start, end
cudaEventCreate(&start);
cudaEventCreate(&end);
printf ("\nGPU computation ... ");
cudaEventRecord(start,0);
//-- insert CUDA code ----------------
// host to device buffer copies
cudaMemcpy(d_a, h_a, nsize * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, nsize * sizeof(float), cudaMemcpyHostToDevice);
//------------------------------------
//-- insert CUDA code ----------------
// arrayFunc kernel launch
arrayFunc<<<nBlocks,nThreads>>>(d_a, d_b, d_c, nsize);
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
cudaMemcpy(h_c, d_c, nsize*sizeof(float), cudaMemcpyDeviceToHost);
//------------------------------------
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
cudaMallocHost((void**)&cpuResult, nsize * sizeof(float));
cudaEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free resources on host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
291669bcd961da5dbee57c57bc363ee23817f8cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "setBoundaryInt2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_boundary = NULL;
hipMalloc(&d_boundary, XSIZE*YSIZE);
int startPos = 1;
int numKey = 1;
int rLen = 1;
int2 *d_boundaryRange = NULL;
hipMalloc(&d_boundaryRange, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
setBoundaryInt2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_boundary,startPos,numKey,rLen,d_boundaryRange);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
setBoundaryInt2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_boundary,startPos,numKey,rLen,d_boundaryRange);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
setBoundaryInt2_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_boundary,startPos,numKey,rLen,d_boundaryRange);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 291669bcd961da5dbee57c57bc363ee23817f8cf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "setBoundaryInt2_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *d_boundary = NULL;
cudaMalloc(&d_boundary, XSIZE*YSIZE);
int startPos = 1;
int numKey = 1;
int rLen = 1;
int2 *d_boundaryRange = NULL;
cudaMalloc(&d_boundaryRange, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
setBoundaryInt2_kernel<<<gridBlock,threadBlock>>>(d_boundary,startPos,numKey,rLen,d_boundaryRange);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
setBoundaryInt2_kernel<<<gridBlock,threadBlock>>>(d_boundary,startPos,numKey,rLen,d_boundaryRange);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
setBoundaryInt2_kernel<<<gridBlock,threadBlock>>>(d_boundary,startPos,numKey,rLen,d_boundaryRange);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
69c8582fd2253faecb4abe54370b001ae475ea2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
/* This kernel improves on the last one by using a smarter "linear indexing" approach.
* In this approach, all threads access *consecutive* indices on each iteration of the loop.
* This is better because it allows the hardware to take advantage of *global memory coalescing.*
*
* This time, each thread will add an element in the first half of the array with another one in
* the second half. This means that collectively, the threads will access
* consective array indices when they read from the right half, and when they (read and) write to the left half.
* Like the previous approach, the number of threads required still halves on each iteration.
* However, unlike the previous approach, the distance between the elements being added
* does *not* double on each iteration, because the results are written back to the first half
* of the array in a consecutive fashion. This can significantly improve the performance of
* the kernel because coalescing cuts down on the number of memory requests that need
* to be made (several large requests rather than many individual ones).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// Calculate the index that this block's chunk of values starts at.
// As last time, each thread adds 2 values, so each block adds a total of
// block_size * 2 values.
// Note: unlike last time, the stride here only needs to go up to block_size.
// This is because we are eliminating the gaps that accumulated between the
// partial results in the last approach.
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2)
{
if (thread_id < stride && // On first iteration, this will be true for all threads.
// On subsequent iterations, it will ensure that we
// always use the threads in the lower half of the
// block (the ones with the lowest ids). This guarentees
// that the remaining values will always be
// contiguous in memory (unlike the last approach,
// which left gaps between them)
block_start + stride < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they don't
// interfere.
{
input[block_start] += input[block_start + stride];
}
// As last time, we need to sync.
__syncthreads();
}
// As last time, thread 0 writes this block's partial result to the output buffer.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
| 69c8582fd2253faecb4abe54370b001ae475ea2c.cu | #include "kernels.h"
/* This kernel improves on the last one by using a smarter "linear indexing" approach.
* In this approach, all threads access *consecutive* indices on each iteration of the loop.
* This is better because it allows the hardware to take advantage of *global memory coalescing.*
*
* This time, each thread will add an element in the first half of the array with another one in
* the second half. This means that collectively, the threads will access
* consective array indices when they read from the right half, and when they (read and) write to the left half.
* Like the previous approach, the number of threads required still halves on each iteration.
* However, unlike the previous approach, the distance between the elements being added
* does *not* double on each iteration, because the results are written back to the first half
* of the array in a consecutive fashion. This can significantly improve the performance of
* the kernel because coalescing cuts down on the number of memory requests that need
* to be made (several large requests rather than many individual ones).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// Calculate the index that this block's chunk of values starts at.
// As last time, each thread adds 2 values, so each block adds a total of
// block_size * 2 values.
// Note: unlike last time, the stride here only needs to go up to block_size.
// This is because we are eliminating the gaps that accumulated between the
// partial results in the last approach.
unsigned int block_start = block_id * block_size * 2 + thread_id;
for (unsigned int stride = block_size; stride > 0; stride /= 2)
{
if (thread_id < stride && // On first iteration, this will be true for all threads.
// On subsequent iterations, it will ensure that we
// always use the threads in the lower half of the
// block (the ones with the lowest ids). This guarentees
// that the remaining values will always be
// contiguous in memory (unlike the last approach,
// which left gaps between them)
block_start + stride < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they don't
// interfere.
{
input[block_start] += input[block_start + stride];
}
// As last time, we need to sync.
__syncthreads();
}
// As last time, thread 0 writes this block's partial result to the output buffer.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
|
f9b7a0729d6dd5346092d87acfffd4dc186bec87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../reduce.cuh"
#include "../store/store.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// file to test multi-aggerated values reduce in single thread.
// including single-block reduce/multi-block reduce by x/y directions.
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
printf("Ouput (show few results):\n");
for (auto i = 0; i < ::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < ::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("\n");
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
}
// Kahan summation for single thread Sum implement.
// More info in 'test_kahan.cc'
template <typename T>
__global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto j = 0; j < y_len; j++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < x_len; i++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[j] = sum;
}
}
template <typename T>
__global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto i = 0; i < x_len; i++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto j = 0; j < y_len; j++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[i] = sum;
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op) {
T T_red_rf[4]; // must be explict 16384 = 4096 * 4 * 1
__shared__ T red_buf[4][1024];
__shared__ T temp_output[4]; // temp storage for output
for (int i = 0; i < 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_y; ++i) {
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_x; ++k) {
if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y) * x_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_y; ++i) {
AkgReduce<T, ReduceOp, 1024, REDUCE2D_X>(op, &temp_output[i * blockDim.y + 0], &red_buf[i][0], T_red_rf[i]);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < item_per_thread_y; ++i) {
AkgAtomicReturn<T, ReduceOp>(
temp_output[i], &output[blockIdx.y * blockDim.y * item_per_thread_y + i * blockDim.y + threadIdx.y], op);
}
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op, int sharedmem_x) {
T T_red_rf[4];
__shared__ T red_buf[4 * 1024];
__shared__ T temp_output[32 * 4];
for (int i = 0; i < 32 * 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_x; ++i) { // x is non-reduce-axis
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_y; ++k) { // here y is reduce-axis
if (threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y) * y_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_x; ++i) {
AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[i * blockDim.x + threadIdx.x], &red_buf[i * 1024],
T_red_rf[i], sharedmem_x);
}
__syncthreads();
if (threadIdx.y == 0) {
for (int i = 0; i < item_per_thread_x; ++i) {
AkgAtomicReturn<T, ReduceOp>(temp_output[i * blockDim.x + threadIdx.x],
&output[blockIdx.x * blockDim.x * item_per_thread_x + blockDim.x * i + threadIdx.x],
op);
}
}
}
template <typename T>
void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = y_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < y_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(hipMalloc((void **)&d_I, input_bytes));
GetGpuErr(hipMemcpy((void *)d_I, (void *)h_I, input_bytes, hipMemcpyHostToDevice));
GetGpuErr(hipMalloc((void **)&d_O, output_bytes));
GetGpuErr(hipMemcpy((void *)d_O, (void *)h_O, output_bytes, hipMemcpyHostToDevice));
GetGpuErr(hipMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(hipMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, hipMemcpyHostToDevice));
// compute single thread resutls
hipLaunchKernelGGL(( ComputeResultAlongXSingleThread<T>), dim3(1), dim3(1), 0, 0, x_len, y_len, d_I, expected_d_O);
GetGpuErr(hipMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, hipMemcpyDeviceToHost));
dim3 gridSize(8, 4096);
dim3 blockSize(1024, 1);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
hipLaunchKernelGGL(( ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp>)
, dim3(gridSize), dim3(blockSize), 0, 0, x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp());
GetGpuErr(hipMemcpy((void *)h_O, (void *)d_O, output_bytes, hipMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, y_len);
GetGpuErr(hipFree(expected_d_O));
GetGpuErr(hipFree(d_O));
GetGpuErr(hipFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
template <typename T>
void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = x_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < x_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(hipMalloc((void **)&d_I, input_bytes));
GetGpuErr(hipMemcpy((void *)d_I, (void *)h_I, input_bytes, hipMemcpyHostToDevice));
GetGpuErr(hipMalloc((void **)&d_O, output_bytes));
GetGpuErr(hipMemcpy((void *)d_O, (void *)h_O, output_bytes, hipMemcpyHostToDevice));
GetGpuErr(hipMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(hipMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, hipMemcpyHostToDevice));
// compute single thread results
hipLaunchKernelGGL(( ComputeResultAlongYSingleThread<T>), dim3(1), dim3(1), 0, 0, x_len, y_len, d_I, expected_d_O);
GetGpuErr(hipMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, hipMemcpyDeviceToHost));
dim3 gridSize(128, 128);
dim3 blockSize(32, 32);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
int sharedmem_x = 32;
hipLaunchKernelGGL(( ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp>), dim3(gridSize), dim3(blockSize), 0, 0,
x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp(), sharedmem_x);
GetGpuErr(hipMemcpy((void *)h_O, (void *)d_O, output_bytes, hipMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, x_len);
GetGpuErr(hipFree(expected_d_O));
GetGpuErr(hipFree(d_O));
GetGpuErr(hipFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
// TestReduce2DAlongX<int>(128, 8, "int", true);
// TestReduce2DAlongX<half>(128, 8, "half", true);
// TestReduce2DAlongX<float>(128, 8, "float", true);
// TestReduce2DAlongX<double>(128, 8, "double", true);
// TestReduce2DAlongX<int>(128, 8, "int", false);
TestReduce2DAlongX<float>(16384, 16384, "float", false);
// TestReduce2DAlongX<double>(128, 8, "double", false);
// TestReduce2DAlongY<int>(8, 128, "int", true);
// TestReduce2DAlongY<half>(8, 128, "half", true);
// TestReduce2DAlongY<float>(8, 128, "float", true);
// TestReduce2DAlongY<double>(8, 128, "double", true);
// TestReduce2DAlongY<int>(8, 128, "int", false);
// TestReduce2DAlongY<half>(8, 128, "half", false);
TestReduce2DAlongY<float>(16384, 16384, "float", false);
// TestReduce2DAlongY<double>(8, 128, "double", false);
return 0;
}
| f9b7a0729d6dd5346092d87acfffd4dc186bec87.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../utils/util.cuh"
#include "../algorithm/shared_reduce.cuh"
#include "../reduce.cuh"
#include "../store/store.cuh"
#include <cstdlib>
#include <ctime>
#include <cstdio>
using namespace akg_reduce;
using namespace std;
// file to test multi-aggerated values reduce in single thread.
// including single-block reduce/multi-block reduce by x/y directions.
template <typename T>
void CompareResults(T *arr1, T *arr2, int len) {
double total_err = 0.0;
bool flag = true;
for (auto i = 0; i < len; i++) {
if (std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i])) > 1e-03) {
flag = false;
}
total_err += std::abs(TypeTransform<double, T>(arr1[i]) - TypeTransform<double, T>(arr2[i]));
}
if (flag) {
printf("[CORRECT] Output is equal to Expected.\n");
} else {
printf("[INCORRECT] Output is not equal to Expected\n");
printf("Ouput (show few results):\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr1[i]));
}
printf("\n");
printf("Expected:\n");
for (auto i = 0; i < std::min(10, len); i++) {
printf("%f ", TypeTransform<double, T>(arr2[i]));
}
printf("\n");
}
printf("AVERAGE_ERROR = %f\n", total_err / (double)len);
}
// Kahan summation for single thread Sum implement.
// More info in 'test_kahan.cc'
template <typename T>
__global__ void ComputeResultAlongXSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto j = 0; j < y_len; j++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto i = 0; i < x_len; i++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[j] = sum;
}
}
template <typename T>
__global__ void ComputeResultAlongYSingleThread(int x_len, int y_len, T *arr, T *output) {
for (auto i = 0; i < x_len; i++) {
T sum = 0.0;
T low_bits = 0.0;
T lower_val, cropped_sum;
for (auto j = 0; j < y_len; j++) {
lower_val = arr[i + j * x_len] - low_bits;
cropped_sum = sum + lower_val;
low_bits = (cropped_sum - sum) - lower_val;
sum = cropped_sum;
}
output[i] = sum;
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongXGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op) {
T T_red_rf[4]; // must be explict 16384 = 4096 * 4 * 1
__shared__ T red_buf[4][1024];
__shared__ T temp_output[4]; // temp storage for output
for (int i = 0; i < 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_y; ++i) {
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_x; ++k) {
if (threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + k * blockDim.x + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + i * blockDim.y + blockIdx.y * blockDim.y * item_per_thread_y) * x_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_y; ++i) {
AkgReduce<T, ReduceOp, 1024, REDUCE2D_X>(op, &temp_output[i * blockDim.y + 0], &red_buf[i][0], T_red_rf[i]);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < item_per_thread_y; ++i) {
AkgAtomicReturn<T, ReduceOp>(
temp_output[i], &output[blockIdx.y * blockDim.y * item_per_thread_y + i * blockDim.y + threadIdx.y], op);
}
}
}
template <typename T, typename ReduceOp>
__global__ void ComputeResultAlongYGPUMultiBlock(int x_len, int y_len, T *arr, T *output, int item_per_thread_x,
int item_per_thread_y, ReduceOp op, int sharedmem_x) {
T T_red_rf[4];
__shared__ T red_buf[4 * 1024];
__shared__ T temp_output[32 * 4];
for (int i = 0; i < 32 * 4; ++i) {
temp_output[i] = (T)0.0;
}
for (int i = 0; i < item_per_thread_x; ++i) { // x is non-reduce-axis
T_red_rf[i] = 0.0;
for (int k = 0; k < item_per_thread_y; ++k) { // here y is reduce-axis
if (threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x < x_len &&
threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y < y_len) {
T_red_rf[i] += arr[threadIdx.x + blockDim.x * i + blockIdx.x * blockDim.x * item_per_thread_x +
(threadIdx.y + blockDim.y * k + blockIdx.y * blockDim.y * item_per_thread_y) * y_len];
}
}
}
__syncthreads();
for (int i = 0; i < item_per_thread_x; ++i) {
AkgReduce<T, ReduceOp, 32, REDUCE2D_Y>(op, &temp_output[i * blockDim.x + threadIdx.x], &red_buf[i * 1024],
T_red_rf[i], sharedmem_x);
}
__syncthreads();
if (threadIdx.y == 0) {
for (int i = 0; i < item_per_thread_x; ++i) {
AkgAtomicReturn<T, ReduceOp>(temp_output[i * blockDim.x + threadIdx.x],
&output[blockIdx.x * blockDim.x * item_per_thread_x + blockDim.x * i + threadIdx.x],
op);
}
}
}
template <typename T>
void TestReduce2DAlongX(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongX ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = y_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < y_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread resutls
ComputeResultAlongXSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
dim3 gridSize(8, 4096);
dim3 blockSize(1024, 1);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
ComputeResultAlongXGPUMultiBlock<T, akg_reduce::SumOp>
<<<gridSize, blockSize>>>(x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp());
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, y_len);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
template <typename T>
void TestReduce2DAlongY(int x_len, int y_len, string type_name, bool single_block = true, bool verbose = false) {
printf("--- TEST CASE Reduce2DAlongY ---\n X = %d, Y = %d, TYPE = %s\n", x_len, y_len, type_name.c_str());
int input_bytes = x_len * y_len * sizeof(T);
int output_bytes = x_len * sizeof(T);
T *h_I, *d_I, *h_O, *d_O, *expected_h_O, *expected_d_O;
h_I = (T *)malloc(input_bytes);
h_O = (T *)malloc(output_bytes);
expected_h_O = (T *)malloc(output_bytes);
// random initialize
srand(time(0));
for (auto i = 0; i < x_len * y_len; i++) {
h_I[i] = TypeTransform<T, double>((rand() % 10000000) / 10000000.0);
}
if (verbose) {
printf("[VERBOSE] random Input data:\n");
for (auto j = 0; j < y_len; j++) {
for (auto i = 0; i < x_len; i++) {
printf("%f ", TypeTransform<double, T>(h_I[i + j * x_len]));
}
printf("\n");
}
}
for (auto i = 0; i < x_len; i++) {
h_O[i] = TypeTransform<T, double>(0.0);
expected_h_O[i] = TypeTransform<T, double>(0.0);
}
// host to device
GetGpuErr(cudaMalloc((void **)&d_I, input_bytes));
GetGpuErr(cudaMemcpy((void *)d_I, (void *)h_I, input_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)d_O, (void *)h_O, output_bytes, cudaMemcpyHostToDevice));
GetGpuErr(cudaMalloc((void **)&expected_d_O, output_bytes));
GetGpuErr(cudaMemcpy((void *)expected_d_O, (void *)expected_h_O, output_bytes, cudaMemcpyHostToDevice));
// compute single thread results
ComputeResultAlongYSingleThread<T><<<1, 1>>>(x_len, y_len, d_I, expected_d_O);
GetGpuErr(cudaMemcpy((void *)expected_h_O, (void *)expected_d_O, output_bytes, cudaMemcpyDeviceToHost));
dim3 gridSize(128, 128);
dim3 blockSize(32, 32);
int item_per_block_x = (x_len - 1) / gridSize.x + 1;
int item_per_thread_x = (item_per_block_x - 1) / blockSize.x + 1;
int item_per_block_y = (y_len - 1) / gridSize.y + 1;
int item_per_thread_y = (item_per_block_y - 1) / blockSize.y + 1;
int sharedmem_x = 32;
ComputeResultAlongYGPUMultiBlock<T, akg_reduce::SumOp><<<gridSize, blockSize>>>(
x_len, y_len, d_I, d_O, item_per_thread_x, item_per_thread_y, akg_reduce::SumOp(), sharedmem_x);
GetGpuErr(cudaMemcpy((void *)h_O, (void *)d_O, output_bytes, cudaMemcpyDeviceToHost));
// compare GPU with CPU
CompareResults<T>(h_O, expected_h_O, x_len);
GetGpuErr(cudaFree(expected_d_O));
GetGpuErr(cudaFree(d_O));
GetGpuErr(cudaFree(d_I));
free(expected_h_O);
free(h_O);
free(h_I);
printf("--- CASE END ---\n\n");
}
int main() {
// TestReduce2DAlongX<int>(128, 8, "int", true);
// TestReduce2DAlongX<half>(128, 8, "half", true);
// TestReduce2DAlongX<float>(128, 8, "float", true);
// TestReduce2DAlongX<double>(128, 8, "double", true);
// TestReduce2DAlongX<int>(128, 8, "int", false);
TestReduce2DAlongX<float>(16384, 16384, "float", false);
// TestReduce2DAlongX<double>(128, 8, "double", false);
// TestReduce2DAlongY<int>(8, 128, "int", true);
// TestReduce2DAlongY<half>(8, 128, "half", true);
// TestReduce2DAlongY<float>(8, 128, "float", true);
// TestReduce2DAlongY<double>(8, 128, "double", true);
// TestReduce2DAlongY<int>(8, 128, "int", false);
// TestReduce2DAlongY<half>(8, 128, "half", false);
TestReduce2DAlongY<float>(16384, 16384, "float", false);
// TestReduce2DAlongY<double>(8, 128, "double", false);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.