hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
088c1acf80f1ed8c1f16381bbc80f2c977a38cf0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // TODO(ataei): reduce the apparent redundancy of all the code below. #include "caffe2/operators/pool_op.h" #include <cfloat> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { struct AveragePool { explicit AveragePool(const OperatorBase& /* op */) {} }; struct MaxPool { explicit MaxPool(const OperatorBase& /* op */) {} }; template <typename T> __global__ void AveragePool1DForwardNCHWCUDAKernel( const int K, const int X_size, const int Y_size, const int kernel, const int stride, const int pad, const bool count_include_pad, const T* X, T* Y) { const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_size; T* Y_ptr = Y + nc * Y_size; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_size) { const int x = y * stride; const int l = max(x - pad, 0); const int r = min(x - pad + kernel, X_size); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel : r - l); T sum = 0; for (int i = l; i < r; ++i) { sum += X_ptr[i]; } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool1DForwardNHWCCUDAKernel( const int C, const int X_size, const int Y_size, const int kernel, const int stride, const int pad, const bool count_include_pad, const T* X, T* Y) { const int n = blockIdx.x / Y_size; const int y = blockIdx.x % Y_size; const int x = y * stride; const int l = max(x - pad, 0); const int r = min(x - pad + kernel, X_size); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel : r - l); const T* X_ptr = X + n * X_size * C; T* Y_ptr = Y + n * Y_size * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = l; i < r; ++i) { sum += X_ptr[i * C + c]; } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void AveragePool2DForwardNCHWCUDAKernel( const int K, const int X_H, const int X_W, const int Y_H, const int Y_W, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_H * X_W; const int Y_HxW = Y_H * Y_W; const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_HxW; T* Y_ptr = Y + nc * Y_HxW; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_HxW) { const int yh = y / Y_W; const int yw = y % Y_W; const int xh = yh * stride_h; const int xw = yw * stride_w; const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_h * kernel_w : (b - t) * (r - l)); T sum = 0; for (int i = t; i < b; ++i) { for (int j = l; j < r; ++j) { sum += X_ptr[i * X_W + j]; } } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool2DForwardNHWCCUDAKernel( const int C, const int X_H, const int X_W, const int Y_H, const int Y_W, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_H * X_W; const int Y_HxW = Y_H * Y_W; const int n = blockIdx.x / Y_HxW; const int y = blockIdx.x % Y_HxW; const int yh = y / Y_W; const int yw = y % Y_W; const int xh = yh * stride_h; const int xw = yw * stride_w; const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_h * kernel_w : (b - t) * (r - l)); const T* X_ptr = X + n * X_HxW * C; T* Y_ptr = Y + n * Y_HxW * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = t; i < b; ++i) { for (int j = l; j < r; ++j) { sum += X_ptr[(i * X_W + j) * C + c]; } } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void AveragePool3DForwardNCHWCUDAKernel( const int K, const int X_D, const int X_H, const int X_W, const int Y_D, const int Y_H, const int Y_W, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_D * X_H * X_W; const int Y_HxW = Y_D * Y_H * Y_W; const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_HxW; T* Y_ptr = Y + nc * Y_HxW; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_HxW) { const int yy = y / Y_W; const int yw = y % Y_W; const int yh = yy % Y_H; const int yd = yy / Y_H; const int xd = yd * stride_d; const int xh = yh * stride_h; const int xw = yw * stride_w; const int p = max(xd - pad_p, 0); const int a = min(xd - pad_p + kernel_h, X_D); const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_d * kernel_h * kernel_w : (a - p) * (b - t) * (r - l)); T sum = 0; for (int i = p; i < a; ++i) { for (int j = t; j < b; ++j) { for (int k = l; k < r; ++k) { sum += X_ptr[(i * X_H + j) * X_W + k]; } } } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool3DForwardNHWCCUDAKernel( const int C, const int X_D, const int X_H, const int X_W, const int Y_D, const int Y_H, const int Y_W, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_D * X_H * X_W; const int Y_HxW = Y_D * Y_H * Y_W; const int n = blockIdx.x / Y_HxW; const int y = blockIdx.x % Y_HxW; const int yy = y / Y_W; const int yw = y % Y_W; const int yh = yy % Y_H; const int yd = yy / Y_H; const int xd = yd * stride_d; const int xh = yh * stride_h; const int xw = yw * stride_w; const int p = max(xd - pad_p, 0); const int a = min(xd - pad_p + kernel_h, X_D); const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_d * kernel_h * kernel_w : (a - p) * (b - t) * (r - l)); const T* X_ptr = X + n * X_HxW * C; T* Y_ptr = Y + n * Y_HxW * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = p; i < a; ++i) { for (int j = t; j < b; ++j) { for (int k = l; k < r; ++k) { sum += X_ptr[((i * X_H + j) * X_W + k) * C + c]; } } } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void Ave1DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width * pooled_depth; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = ph * pooled_depth * pooled_width + pooled_depth * pw + pd; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave1DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * channels + c; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph * channels] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / channels / depth) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * pooled_depth * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = (ph * pooled_depth * pooled_width + pw * pooled_depth + pd) * channels; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } } // namespace template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 1>( const int N, const int C, const std::array<int, 1>& X_dims, const std::array<int, 1>& Y_dims, const std::array<int, 1>& kernel, const std::array<int, 1>& /* dilation */, const std::array<int, 1>& stride, const std::array<int, 2>& pads, const float* X, float* Y, CUDAContext* context) const { const int K = (Y_dims[0] + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; hipLaunchKernelGGL(( AveragePool1DForwardNCHWCUDAKernel<float>) , dim3(N * C * K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), K, X_dims[0], Y_dims[0], kernel[0], stride[0], pads[0], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 1>( const int N, const int C, const std::array<int, 1>& X_dims, const std::array<int, 1>& Y_dims, const std::array<int, 1>& kernel, const std::array<int, 1>& /* dilation */, const std::array<int, 1>& stride, const std::array<int, 2>& pads, const float* X, float* Y, CUDAContext* context) const { hipLaunchKernelGGL(( AveragePool1DForwardNHWCCUDAKernel<float>) , dim3(N * Y_dims[0]), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), C, X_dims[0], Y_dims[0], kernel[0], stride[0], pads[0], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 2>( const int N, const int C, const std::array<int, 2>& X_dims, const std::array<int, 2>& Y_dims, const std::array<int, 2>& kernel, const std::array<int, 2>& /* dilation */, const std::array<int, 2>& stride, const std::array<int, 4>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1]; const int K = (Y_HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; hipLaunchKernelGGL(( AveragePool2DForwardNCHWCUDAKernel<float>) , dim3(N * C * K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), K, X_dims[0], X_dims[1], Y_dims[0], Y_dims[1], kernel[0], kernel[1], stride[0], stride[1], pads[0], pads[1], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 2>( const int N, const int C, const std::array<int, 2>& X_dims, const std::array<int, 2>& Y_dims, const std::array<int, 2>& kernel, const std::array<int, 2>& /* dilation */, const std::array<int, 2>& stride, const std::array<int, 4>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1]; hipLaunchKernelGGL(( AveragePool2DForwardNHWCCUDAKernel<float>) , dim3(N * Y_HxW), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), C, X_dims[0], X_dims[1], Y_dims[0], Y_dims[1], kernel[0], kernel[1], stride[0], stride[1], pads[0], pads[1], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 3>( const int N, const int C, const std::array<int, 3>& X_dims, const std::array<int, 3>& Y_dims, const std::array<int, 3>& kernel, const std::array<int, 3>& /* dilation */, const std::array<int, 3>& stride, const std::array<int, 6>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1] * Y_dims[2]; const int K = (Y_HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; hipLaunchKernelGGL(( AveragePool3DForwardNCHWCUDAKernel<float>) , dim3(N * C * K), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), K, X_dims[0], X_dims[1], X_dims[2], Y_dims[0], Y_dims[1], Y_dims[2], kernel[0], kernel[1], kernel[2], stride[0], stride[1], stride[2], pads[0], pads[1], pads[2], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 3>( const int N, const int C, const std::array<int, 3>& X_dims, const std::array<int, 3>& Y_dims, const std::array<int, 3>& kernel, const std::array<int, 3>& /* dilation */, const std::array<int, 3>& stride, const std::array<int, 6>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1] * Y_dims[2]; hipLaunchKernelGGL(( AveragePool3DForwardNHWCCUDAKernel<float>) , dim3(N * Y_HxW), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), C, X_dims[0], X_dims[1], X_dims[2], Y_dims[0], Y_dims[1], Y_dims[2], kernel[0], kernel[1], kernel[2], stride[0], stride[1], stride[2], pads[0], pads[1], pads[2], count_include_pad, X, Y); return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.dim32(1), X.dim32(1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 2, X.sizes().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Ave1DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Ave2DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Ave3DPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.ndim(), dY.ndim()); CAFFE_ENFORCE_EQ(X.dim32(X.ndim() - 1), dY.dim32(dY.ndim() - 1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 1, X.sizes().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( Ave1DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( Ave2DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( Ave3DPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } namespace { template <typename T> __global__ void MaxPool1DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int ph = index % pooled_height; int c = (index / pooled_height) % channels; int n = index / pooled_height / channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height; for (int h = hstart; h < hend; ++h) { int idx = c * height + h; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = c * height * width + h * width + w; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int c = (index / pooled_depth / pooled_width / pooled_height) % channels; int n = index / pooled_depth / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dstart = pd * stride_d - pad_f; int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((c * height + h) * width + w) * depth + d; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * channels; for (int h = hstart; h < hend; ++h) { int idx = h * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = (h * width + w) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int dstart = (n % pooled_depth) * stride_d - pad_f; n /= pooled_depth; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * depth * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((h * width + w) * depth + d) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = (n * channels + c) * pooled_height; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = (n * channels + c) * pooled_height * pooled_width; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + ph * pooled_width + pw; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_depth; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + (ph * pooled_width + pw) * pooled_depth + pd; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } template <typename T> __global__ void MaxPool1DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = n * pooled_height * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = n * pooled_height * pooled_width * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + (ph * pooled_width + pw) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / depth / channels) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = n * pooled_height * pooled_width * pooled_depth * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + ((ph * pooled_width + pw) * pooled_depth + d) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } } // namespace template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 2, X.sizes().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 1, X.sizes().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: hipLaunchKernelGGL(( MaxPool1DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: hipLaunchKernelGGL(( MaxPool2DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: hipLaunchKernelGGL(( MaxPool3DBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } REGISTER_CUDA_OPERATOR( AveragePool, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePoolGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool1D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool1DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool2D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool2DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool3D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool3DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(MaxPool, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPoolGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool1D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool1DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool2D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool2DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool3D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool3DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); } // namespace caffe2
088c1acf80f1ed8c1f16381bbc80f2c977a38cf0.cu
// TODO(ataei): reduce the apparent redundancy of all the code below. #include "caffe2/operators/pool_op.h" #include <cfloat> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { struct AveragePool { explicit AveragePool(const OperatorBase& /* op */) {} }; struct MaxPool { explicit MaxPool(const OperatorBase& /* op */) {} }; template <typename T> __global__ void AveragePool1DForwardNCHWCUDAKernel( const int K, const int X_size, const int Y_size, const int kernel, const int stride, const int pad, const bool count_include_pad, const T* X, T* Y) { const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_size; T* Y_ptr = Y + nc * Y_size; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_size) { const int x = y * stride; const int l = max(x - pad, 0); const int r = min(x - pad + kernel, X_size); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel : r - l); T sum = 0; for (int i = l; i < r; ++i) { sum += X_ptr[i]; } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool1DForwardNHWCCUDAKernel( const int C, const int X_size, const int Y_size, const int kernel, const int stride, const int pad, const bool count_include_pad, const T* X, T* Y) { const int n = blockIdx.x / Y_size; const int y = blockIdx.x % Y_size; const int x = y * stride; const int l = max(x - pad, 0); const int r = min(x - pad + kernel, X_size); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel : r - l); const T* X_ptr = X + n * X_size * C; T* Y_ptr = Y + n * Y_size * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = l; i < r; ++i) { sum += X_ptr[i * C + c]; } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void AveragePool2DForwardNCHWCUDAKernel( const int K, const int X_H, const int X_W, const int Y_H, const int Y_W, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_H * X_W; const int Y_HxW = Y_H * Y_W; const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_HxW; T* Y_ptr = Y + nc * Y_HxW; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_HxW) { const int yh = y / Y_W; const int yw = y % Y_W; const int xh = yh * stride_h; const int xw = yw * stride_w; const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_h * kernel_w : (b - t) * (r - l)); T sum = 0; for (int i = t; i < b; ++i) { for (int j = l; j < r; ++j) { sum += X_ptr[i * X_W + j]; } } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool2DForwardNHWCCUDAKernel( const int C, const int X_H, const int X_W, const int Y_H, const int Y_W, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_H * X_W; const int Y_HxW = Y_H * Y_W; const int n = blockIdx.x / Y_HxW; const int y = blockIdx.x % Y_HxW; const int yh = y / Y_W; const int yw = y % Y_W; const int xh = yh * stride_h; const int xw = yw * stride_w; const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_h * kernel_w : (b - t) * (r - l)); const T* X_ptr = X + n * X_HxW * C; T* Y_ptr = Y + n * Y_HxW * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = t; i < b; ++i) { for (int j = l; j < r; ++j) { sum += X_ptr[(i * X_W + j) * C + c]; } } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void AveragePool3DForwardNCHWCUDAKernel( const int K, const int X_D, const int X_H, const int X_W, const int Y_D, const int Y_H, const int Y_W, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_D * X_H * X_W; const int Y_HxW = Y_D * Y_H * Y_W; const int nc = blockIdx.x / K; const int block = blockIdx.x % K; const T* X_ptr = X + nc * X_HxW; T* Y_ptr = Y + nc * Y_HxW; const int y = threadIdx.x + block * CAFFE_CUDA_NUM_THREADS; if (y < Y_HxW) { const int yy = y / Y_W; const int yw = y % Y_W; const int yh = yy % Y_H; const int yd = yy / Y_H; const int xd = yd * stride_d; const int xh = yh * stride_h; const int xw = yw * stride_w; const int p = max(xd - pad_p, 0); const int a = min(xd - pad_p + kernel_h, X_D); const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_d * kernel_h * kernel_w : (a - p) * (b - t) * (r - l)); T sum = 0; for (int i = p; i < a; ++i) { for (int j = t; j < b; ++j) { for (int k = l; k < r; ++k) { sum += X_ptr[(i * X_H + j) * X_W + k]; } } } Y_ptr[y] = sum * scale; } } template <typename T> __global__ void AveragePool3DForwardNHWCCUDAKernel( const int C, const int X_D, const int X_H, const int X_W, const int Y_D, const int Y_H, const int Y_W, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_p, const int pad_t, const int pad_l, const bool count_include_pad, const T* X, T* Y) { const int X_HxW = X_D * X_H * X_W; const int Y_HxW = Y_D * Y_H * Y_W; const int n = blockIdx.x / Y_HxW; const int y = blockIdx.x % Y_HxW; const int yy = y / Y_W; const int yw = y % Y_W; const int yh = yy % Y_H; const int yd = yy / Y_H; const int xd = yd * stride_d; const int xh = yh * stride_h; const int xw = yw * stride_w; const int p = max(xd - pad_p, 0); const int a = min(xd - pad_p + kernel_h, X_D); const int t = max(xh - pad_t, 0); const int b = min(xh - pad_t + kernel_h, X_H); const int l = max(xw - pad_l, 0); const int r = min(xw - pad_l + kernel_w, X_W); const T scale = T(1) / static_cast<T>(count_include_pad ? kernel_d * kernel_h * kernel_w : (a - p) * (b - t) * (r - l)); const T* X_ptr = X + n * X_HxW * C; T* Y_ptr = Y + n * Y_HxW * C; for (int c = threadIdx.x; c < C; c += blockDim.x) { T sum = 0; for (int i = p; i < a; ++i) { for (int j = t; j < b; ++j) { for (int k = l; k < r; ++k) { sum += X_ptr[((i * X_H + j) * X_W + k) * C + c]; } } } Y_ptr[y * C + c] = sum * scale; } } template <typename T> __global__ void Ave1DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNCHW( const int nthreads, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width * pooled_depth; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = ph * pooled_depth * pooled_width + pooled_depth * pw + pd; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave1DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * channels + c; for (int ph = phstart; ph < phend; ++ph) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); int pool_size = (hend - hstart); gradient += top_diff_slice[ph * channels] / pool_size; } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave2DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / pool_size; } } bottom_diff[index] = gradient; } } template <typename T> __global__ void Ave3DPoolBackwardNHWC( const int nthreads, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / channels / depth) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * pooled_depth * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int dstart = pd * stride_d - pad_f; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); int pool_size = (hend - hstart) * (wend - wstart) * (dend - dstart); const int pooled_index = (ph * pooled_depth * pooled_width + pw * pooled_depth + pd) * channels; gradient += top_diff_slice[pooled_index] / pool_size; } } } bottom_diff[index] = gradient; } } } // namespace template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 1>( const int N, const int C, const std::array<int, 1>& X_dims, const std::array<int, 1>& Y_dims, const std::array<int, 1>& kernel, const std::array<int, 1>& /* dilation */, const std::array<int, 1>& stride, const std::array<int, 2>& pads, const float* X, float* Y, CUDAContext* context) const { const int K = (Y_dims[0] + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; AveragePool1DForwardNCHWCUDAKernel<float> <<<N * C * K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( K, X_dims[0], Y_dims[0], kernel[0], stride[0], pads[0], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 1>( const int N, const int C, const std::array<int, 1>& X_dims, const std::array<int, 1>& Y_dims, const std::array<int, 1>& kernel, const std::array<int, 1>& /* dilation */, const std::array<int, 1>& stride, const std::array<int, 2>& pads, const float* X, float* Y, CUDAContext* context) const { AveragePool1DForwardNHWCCUDAKernel<float> <<<N * Y_dims[0], CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( C, X_dims[0], Y_dims[0], kernel[0], stride[0], pads[0], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 2>( const int N, const int C, const std::array<int, 2>& X_dims, const std::array<int, 2>& Y_dims, const std::array<int, 2>& kernel, const std::array<int, 2>& /* dilation */, const std::array<int, 2>& stride, const std::array<int, 4>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1]; const int K = (Y_HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; AveragePool2DForwardNCHWCUDAKernel<float> <<<N * C * K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( K, X_dims[0], X_dims[1], Y_dims[0], Y_dims[1], kernel[0], kernel[1], stride[0], stride[1], pads[0], pads[1], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 2>( const int N, const int C, const std::array<int, 2>& X_dims, const std::array<int, 2>& Y_dims, const std::array<int, 2>& kernel, const std::array<int, 2>& /* dilation */, const std::array<int, 2>& stride, const std::array<int, 4>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1]; AveragePool2DForwardNHWCCUDAKernel<float> <<<N * Y_HxW, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( C, X_dims[0], X_dims[1], Y_dims[0], Y_dims[1], kernel[0], kernel[1], stride[0], stride[1], pads[0], pads[1], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NCHW, 3>( const int N, const int C, const std::array<int, 3>& X_dims, const std::array<int, 3>& Y_dims, const std::array<int, 3>& kernel, const std::array<int, 3>& /* dilation */, const std::array<int, 3>& stride, const std::array<int, 6>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1] * Y_dims[2]; const int K = (Y_HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; AveragePool3DForwardNCHWCUDAKernel<float> <<<N * C * K, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( K, X_dims[0], X_dims[1], X_dims[2], Y_dims[0], Y_dims[1], Y_dims[2], kernel[0], kernel[1], kernel[2], stride[0], stride[1], stride[2], pads[0], pads[1], pads[2], count_include_pad, X, Y); return true; } template <> template <> bool AveragePoolFunctor<CUDAContext>::Forward<float, StorageOrder::NHWC, 3>( const int N, const int C, const std::array<int, 3>& X_dims, const std::array<int, 3>& Y_dims, const std::array<int, 3>& kernel, const std::array<int, 3>& /* dilation */, const std::array<int, 3>& stride, const std::array<int, 6>& pads, const float* X, float* Y, CUDAContext* context) const { const int Y_HxW = Y_dims[0] * Y_dims[1] * Y_dims[2]; AveragePool3DForwardNHWCCUDAKernel<float> <<<N * Y_HxW, CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( C, X_dims[0], X_dims[1], X_dims[2], Y_dims[0], Y_dims[1], Y_dims[2], kernel[0], kernel[1], kernel[2], stride[0], stride[1], stride[2], pads[0], pads[1], pads[2], count_include_pad, X, Y); return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.dim32(1), X.dim32(1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 2, X.sizes().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: Ave1DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: Ave2DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: Ave3DPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, AveragePool>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& dY = Input(2); CAFFE_ENFORCE_EQ(X.ndim(), dY.ndim()); CAFFE_ENFORCE_EQ(X.dim32(X.ndim() - 1), dY.dim32(dY.ndim() - 1)); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 1, X.sizes().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: Ave1DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: Ave2DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: Ave3DPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } namespace { template <typename T> __global__ void MaxPool1DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int ph = index % pooled_height; int c = (index / pooled_height) % channels; int n = index / pooled_height / channels; int hstart = ph * stride_h - pad_t; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height; for (int h = hstart; h < hend; ++h) { int idx = c * height + h; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = c * height * width + h * width + w; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNCHW( const int nthreads, const T* bottom_data, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_depth; int pw = (index / pooled_depth) % pooled_width; int ph = (index / pooled_depth / pooled_width) % pooled_height; int c = (index / pooled_depth / pooled_width / pooled_height) % channels; int n = index / pooled_depth / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dstart = pd * stride_d - pad_f; int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * channels * height * width * depth; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((c * height + h) * width + w) * depth + d; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); hstart = max(hstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * channels; for (int h = hstart; h < hend; ++h) { int idx = h * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool2DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int idx = (h * width + w) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool3DForwardNHWC( const int nthreads, const T* bottom_data, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int c = n % channels; n /= channels; int dstart = (n % pooled_depth) * stride_d - pad_f; n /= pooled_depth; int wstart = (n % pooled_width) * stride_w - pad_l; n /= pooled_width; int hstart = (n % pooled_height) * stride_h - pad_t; n /= pooled_height; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); int dend = min(dstart + kernel_d, depth); hstart = max(hstart, 0); wstart = max(wstart, 0); dstart = max(dstart, 0); T maxval = -FLT_MAX; const T* bdata_offset = bottom_data + n * height * width * depth * channels; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { for (int d = dstart; d < dend; ++d) { int idx = ((h * width + w) * depth + d) * channels + c; if (bdata_offset[idx] > maxval) { maxval = bdata_offset[idx]; } } } } top_data[index] = maxval; } } template <typename T> __global__ void MaxPool1DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int h = index % height + pad_t; const int c = (index / height) % channels; const int n = index / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = (n * channels + c) * pooled_height; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = (n * channels + c) * pooled_height * pooled_width; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + ph * pooled_width + pw; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNCHW( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int channels, const int height, const int width, const int depth, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int d = index % depth + pad_f; const int w = (index / depth) % width + pad_l; const int h = (index / depth / width) % height + pad_t; const int c = (index / depth / width / height) % channels; const int n = index / depth / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = (n * channels + c) * pooled_height * pooled_width * pooled_depth; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + (ph * pooled_width + pw) * pooled_depth + pd; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } template <typename T> __global__ void MaxPool1DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int channels, const int pooled_height, const int kernel_h, const int stride_h, const int pad_t, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int h = (index / channels) % height + pad_t; const int n = index / channels / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int top_offset = n * pooled_height * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { int top_local_offset = top_offset + ph * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } template <typename T> __global__ void MaxPool2DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int top_offset = n * pooled_height * pooled_width * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int top_local_offset = top_offset + (ph * pooled_width + pw) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } template <typename T> __global__ void MaxPool3DBackwardNHWC( const int nthreads, const T* const bottom_data, const T* const top_data, const T* const top_diff, const int num, const int height, const int width, const int depth, const int channels, const int pooled_height, const int pooled_width, const int pooled_depth, const int kernel_h, const int kernel_w, const int kernel_d, const int stride_h, const int stride_w, const int stride_d, const int pad_t, const int pad_l, const int pad_f, T* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int d = index / channels % depth + pad_f; const int w = (index / depth / channels) % width + pad_l; const int h = (index / channels / depth / width) % height + pad_t; const int n = index / channels / depth / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); const int pdstart = (d < kernel_d) ? 0 : (d - kernel_d) / stride_d + 1; const int pdend = min(d / stride_d + 1, pooled_depth); const int top_offset = n * pooled_height * pooled_width * pooled_depth * channels + c; bottom_diff[index] = 0; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { for (int pd = pdstart; pd < pdend; ++pd) { int top_local_offset = top_offset + ((ph * pooled_width + pw) * pooled_depth + d) * channels; if (bottom_data[index] == top_data[top_local_offset]) { bottom_diff[index] += top_diff[top_local_offset]; } } } } } } } // namespace template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: MaxPool1DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(2), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: MaxPool2DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: MaxPool3DForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(2), Y->dim32(3), Y->dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto* Y = Output(0); ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(X.ndim() - 1)); int output_size = Y->size(); switch (kernel_.size()) { case 1: MaxPool1DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), Y->dim32(1), kernel_h(), stride_h(), pad_t(), Y->template mutable_data<float>()); break; case 2: MaxPool2DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>()); break; case 3: MaxPool3DForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), Y->dim32(1), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], Y->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 2, X.sizes().end()); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: MaxPool1DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(2), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: MaxPool2DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: MaxPool3DBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(2), dY.dim32(3), dY.dim32(4), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } template <> bool PoolGradientOp<float, CUDAContext, MaxPool>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), X.ndim()); auto* dX = Output(0); dX->ResizeLike(X); vector<int> dims(X.sizes().begin() + 1, X.sizes().end() - 1); ConvPoolOpBase<CUDAContext>::ComputePads(dims); switch (kernel_.size()) { case 1: MaxPool1DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), dY.dim32(1), kernel_h(), stride_h(), pad_t(), dX->template mutable_data<float>()); break; case 2: MaxPool2DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>()); break; case 3: MaxPool3DBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), X.data<float>(), Y.data<float>(), dY.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), X.dim32(4), dY.dim32(1), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), kernel_[2], stride_h(), stride_w(), stride_[2], pad_t(), pad_l(), pads_[2], dX->template mutable_data<float>()); break; default: CAFFE_THROW("Unsupported pooling size : ", kernel_.size()); } return true; } REGISTER_CUDA_OPERATOR( AveragePool, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePoolGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool1D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool1DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool2D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool2DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR( AveragePool3D, PoolOp<float, CUDAContext, AveragePoolFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( AveragePool3DGradient, PoolGradientOp<float, CUDAContext, AveragePool>); REGISTER_CUDA_OPERATOR(MaxPool, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPoolGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool1D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool1DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool2D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool2DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR(MaxPool3D, PoolOp<float, CUDAContext, MaxPool>); REGISTER_CUDA_OPERATOR( MaxPool3DGradient, PoolGradientOp<float, CUDAContext, MaxPool>); } // namespace caffe2
3fb8d912ae32710942fc67fda322682b53274fae.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cstdlib> #include <hip/hip_runtime.h> #include "particle/particle.h" #include "propagate/propagate.h" __global__ void advanceParticles(float dt, particle * pArray, int nParticles) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < nParticles) { pArray[idx].advance(dt); } } v3 propagate(int n, int seed) { srand(seed); particle * pArray = new particle[n]; particle * devPArray = NULL; hipMalloc(&devPArray, n*sizeof(particle)); hipMemcpy(devPArray, pArray, n*sizeof(particle), hipMemcpyHostToDevice); for(int i=0; i<100; i++) { float dt = (float)rand()/(float) RAND_MAX; // Random distance each step hipLaunchKernelGGL(( advanceParticles), dim3(1 + n/256), dim3(256), 0, 0, dt, devPArray, n); hipDeviceSynchronize(); } hipMemcpy(pArray, devPArray, n*sizeof(particle), hipMemcpyDeviceToHost); v3 totalDistance(0,0,0); v3 temp; for(int i=0; i<n; i++) { temp = pArray[i].getTotalDistance(); totalDistance.x += temp.x; totalDistance.y += temp.y; totalDistance.z += temp.z; } return totalDistance; }
3fb8d912ae32710942fc67fda322682b53274fae.cu
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cstdlib> #include <cuda_runtime.h> #include "particle/particle.h" #include "propagate/propagate.h" __global__ void advanceParticles(float dt, particle * pArray, int nParticles) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(idx < nParticles) { pArray[idx].advance(dt); } } v3 propagate(int n, int seed) { srand(seed); particle * pArray = new particle[n]; particle * devPArray = NULL; cudaMalloc(&devPArray, n*sizeof(particle)); cudaMemcpy(devPArray, pArray, n*sizeof(particle), cudaMemcpyHostToDevice); for(int i=0; i<100; i++) { float dt = (float)rand()/(float) RAND_MAX; // Random distance each step advanceParticles<<< 1 + n/256, 256>>>(dt, devPArray, n); cudaDeviceSynchronize(); } cudaMemcpy(pArray, devPArray, n*sizeof(particle), cudaMemcpyDeviceToHost); v3 totalDistance(0,0,0); v3 temp; for(int i=0; i<n; i++) { temp = pArray[i].getTotalDistance(); totalDistance.x += temp.x; totalDistance.y += temp.y; totalDistance.z += temp.z; } return totalDistance; }
d63bed7ad08c1b68d519e7a5eab6ae017a3084e6.hip
// !!! This is a file automatically generated by hipify!!! /* Carlos Ros Vera <[email protected]> */ #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <float.h> #include "adentu-atom.h" #include "adentu-model.h" #include "adentu-grid.h" #include "adentu-event.h" #include "vec3.h" #include "adentu-cuda-utils.h" extern "C" { #include "adentu-neighbourhood.h" #include "adentu-event-gfc-cuda.h" #include "vec3-cuda.h" } __global__ void adentu_event_gfc_cuda_get_cells_kernel (int *cells, vec3f *pos, vec3i nCell, vec3f origin, vec3f h, int nAtoms); __global__ void adentu_event_gfc_cuda_grain_vs_fluid_kernel (AdentuEvent *ev, vec3f gpos, vec3f gvel, double radius, vec3f *fpos, vec3f *fvel, int *neighbours, int nAtoms); extern "C" AdentuEvent *adentu_event_gfc_cuda_get_next (AdentuModel *model) { AdentuAtom *grain = model->grain; AdentuAtom *fluid = model->fluid; int nGrains = grain->n; int nFluids = fluid->n; vec3f *g_pos = grain->pos, *d_g_pos = NULL; vec3f *f_pos = fluid->pos, *d_f_pos = NULL; vec3f *g_vel = grain->vel, *d_g_vel = NULL; vec3f *f_vel = fluid->vel, *d_f_vel = NULL; int *cells = NULL, *d_cells = NULL; CUDA_CALL (hipMalloc ((void **)&d_g_pos, nGrains * sizeof (vec3f))); CUDA_CALL (hipMalloc ((void **)&d_f_pos, nFluids * sizeof (vec3f))); CUDA_CALL (hipMalloc ((void **)&d_g_vel, nGrains * sizeof (vec3f))); CUDA_CALL (hipMalloc ((void **)&d_f_vel, nFluids * sizeof (vec3f))); CUDA_CALL (hipMalloc ((void **)&d_cells, nGrains * sizeof (vec3f))); CUDA_CALL (hipMemcpy (d_g_pos, g_pos, nGrains * sizeof (vec3f), hipMemcpyHostToDevice)); CUDA_CALL (hipMemcpy (d_f_pos, f_pos, nFluids * sizeof (vec3f), hipMemcpyHostToDevice)); CUDA_CALL (hipMemcpy (d_g_vel, g_vel, nGrains * sizeof (vec3f), hipMemcpyHostToDevice)); CUDA_CALL (hipMemcpy (d_f_vel, f_vel, nFluids * sizeof (vec3f), hipMemcpyHostToDevice)); dim3 gDim, bDim; adentu_cuda_set_grid (&gDim, &bDim, nGrains); hipLaunchKernelGGL(( adentu_event_gfc_cuda_get_cells_kernel), dim3(gDim), dim3(bDim), 0, 0, d_cells, d_g_pos, model->gGrid->nCell, model->gGrid->origin, model->gGrid->h, nGrains); cells = (int *) malloc (nGrains * sizeof (int)); CUDA_CALL (hipMemcpy (cells, d_cells, nGrains * sizeof (int), hipMemcpyDeviceToHost)); CUDA_CALL (hipFree (d_cells)); int neighCells[27], nAtoms, *neighbours, *d_neighbours; AdentuEvent *kevent, *d_kevent, *event, tmp; event = (AdentuEvent *) malloc (sizeof (AdentuEvent)); tmp.type = event->type = ADENTU_EVENT_GFC; tmp.time = event->time = DBL_MAX; tmp.eventData = event->eventData = NULL; for (int i = 0; i < nGrains; ++i) { adentu_neighbourhood_get_cell_neighbourhood (cells[i], model->gGrid, neighCells); neighbours = adentu_neighbourhood_get_atoms (&nAtoms, neighCells, model->fGrid); if (!nAtoms) continue ; CUDA_CALL (hipMalloc ((void **)&d_neighbours, nAtoms * sizeof (int))); CUDA_CALL (hipMemcpy (d_neighbours, neighbours, nAtoms * sizeof (int), hipMemcpyHostToDevice)); adentu_cuda_set_grid (&gDim, &bDim, nAtoms); kevent = (AdentuEvent *) malloc (gDim.x * sizeof (AdentuEvent)); CUDA_CALL (hipMalloc ((void **)&d_kevent, gDim.x * sizeof (AdentuEvent))); hipLaunchKernelGGL(( adentu_event_gfc_cuda_grain_vs_fluid_kernel), dim3(gDim), dim3(bDim), 0, 0, d_kevent, g_pos[i], g_vel[i], grain->radius[i], d_f_pos, d_f_vel, d_neighbours, nAtoms); CUDA_CALL (hipMemcpy (kevent, d_kevent, gDim.x * sizeof (AdentuEvent), hipMemcpyDeviceToHost)); tmp.partner = kevent[0].partner; tmp.time = kevent[0].time; for (int j = 0; j < gDim.x; ++j) if (kevent[j].time < tmp.time) { tmp.time = kevent[j].time; tmp.partner = kevent[j].partner; } if (tmp.time < event->time) { event->time = tmp.time; event->owner = i; event->partner = tmp.partner; event->nEvents = fluid->nCol[tmp.partner]; } free (kevent); CUDA_CALL (hipFree (d_kevent)); CUDA_CALL (hipFree (d_neighbours)); free (neighbours); } CUDA_CALL (hipFree (d_g_pos)); CUDA_CALL (hipFree (d_f_pos)); CUDA_CALL (hipFree (d_g_vel)); CUDA_CALL (hipFree (d_f_vel)); //CUDA_CALL (hipFree (d_cells)); free (cells); return event; } __global__ void adentu_event_gfc_cuda_get_cells_kernel (int *cells, vec3f *pos, vec3i nCell, vec3f origin, vec3f h, int nAtoms) { int idx = threadIdx.x + blockIdx.x * gridDim.x; if (idx >= nAtoms) return ; vec3f cell, p = pos[idx]; cell.x = (int) (p.x + origin.x)/h.x; cell.y = (int) (p.y + origin.y)/h.y; cell.z = (int) (p.z + origin.z)/h.z; cells[idx] = nCell.x * nCell.y * cell.z + nCell.x * cell.y + cell.x; } __global__ void adentu_event_gfc_cuda_grain_vs_fluid_kernel (AdentuEvent *ev, vec3f gpos, vec3f gvel, double radius, vec3f *fpos, vec3f *fvel, int *neighbours, int nAtoms) { int idx = threadIdx.x + blockIdx.x * gridDim.x; int tid = threadIdx.x; int bid = blockIdx.x; __shared__ AdentuEvent events[128]; events[tid].time = -1; __syncthreads (); if (idx >= nAtoms) return ; vec3f pos, vel; vec3f f_pos = fpos[idx]; vec3f f_vel = fvel[idx]; vecSub (pos, f_pos, gpos); vecSub (vel, f_vel, gvel); double PP, disc, num, den, time; PP = vecDot (pos, vel); if (PP < 0.0) { disc = (PP * PP) - (vecMod(vel) * vecMod(vel)) * (vecMod(pos) * vecMod(pos)) - (radius * radius); if (disc > 0) { num = -PP - sqrt (disc); den = (vecMod (vel) * vecMod (vel)); time = num/den; if (time > 0.0) { events[tid].partner = idx; events[tid].time = time; } } } __syncthreads (); int n = 64; while (n != 1 && tid < n) { if (events[tid].time != -1 && events[tid+n].time != -1 && events[tid].time > events[tid+n].time) { events[tid].partner = events[tid+n].partner; events[tid].time = events[tid+n].time; } n /= 2; } __syncthreads (); if (tid == 0) { ev[bid].type = ADENTU_EVENT_GFC; ev[bid].partner = events[0].partner; ev[bid].time = events[0].time; } }
d63bed7ad08c1b68d519e7a5eab6ae017a3084e6.cu
/* Carlos Ríos Vera <[email protected]> */ #include <cuda.h> #include <curand.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <float.h> #include "adentu-atom.h" #include "adentu-model.h" #include "adentu-grid.h" #include "adentu-event.h" #include "vec3.h" #include "adentu-cuda-utils.h" extern "C" { #include "adentu-neighbourhood.h" #include "adentu-event-gfc-cuda.h" #include "vec3-cuda.h" } __global__ void adentu_event_gfc_cuda_get_cells_kernel (int *cells, vec3f *pos, vec3i nCell, vec3f origin, vec3f h, int nAtoms); __global__ void adentu_event_gfc_cuda_grain_vs_fluid_kernel (AdentuEvent *ev, vec3f gpos, vec3f gvel, double radius, vec3f *fpos, vec3f *fvel, int *neighbours, int nAtoms); extern "C" AdentuEvent *adentu_event_gfc_cuda_get_next (AdentuModel *model) { AdentuAtom *grain = model->grain; AdentuAtom *fluid = model->fluid; int nGrains = grain->n; int nFluids = fluid->n; vec3f *g_pos = grain->pos, *d_g_pos = NULL; vec3f *f_pos = fluid->pos, *d_f_pos = NULL; vec3f *g_vel = grain->vel, *d_g_vel = NULL; vec3f *f_vel = fluid->vel, *d_f_vel = NULL; int *cells = NULL, *d_cells = NULL; CUDA_CALL (cudaMalloc ((void **)&d_g_pos, nGrains * sizeof (vec3f))); CUDA_CALL (cudaMalloc ((void **)&d_f_pos, nFluids * sizeof (vec3f))); CUDA_CALL (cudaMalloc ((void **)&d_g_vel, nGrains * sizeof (vec3f))); CUDA_CALL (cudaMalloc ((void **)&d_f_vel, nFluids * sizeof (vec3f))); CUDA_CALL (cudaMalloc ((void **)&d_cells, nGrains * sizeof (vec3f))); CUDA_CALL (cudaMemcpy (d_g_pos, g_pos, nGrains * sizeof (vec3f), cudaMemcpyHostToDevice)); CUDA_CALL (cudaMemcpy (d_f_pos, f_pos, nFluids * sizeof (vec3f), cudaMemcpyHostToDevice)); CUDA_CALL (cudaMemcpy (d_g_vel, g_vel, nGrains * sizeof (vec3f), cudaMemcpyHostToDevice)); CUDA_CALL (cudaMemcpy (d_f_vel, f_vel, nFluids * sizeof (vec3f), cudaMemcpyHostToDevice)); dim3 gDim, bDim; adentu_cuda_set_grid (&gDim, &bDim, nGrains); adentu_event_gfc_cuda_get_cells_kernel<<<gDim, bDim>>> (d_cells, d_g_pos, model->gGrid->nCell, model->gGrid->origin, model->gGrid->h, nGrains); cells = (int *) malloc (nGrains * sizeof (int)); CUDA_CALL (cudaMemcpy (cells, d_cells, nGrains * sizeof (int), cudaMemcpyDeviceToHost)); CUDA_CALL (cudaFree (d_cells)); int neighCells[27], nAtoms, *neighbours, *d_neighbours; AdentuEvent *kevent, *d_kevent, *event, tmp; event = (AdentuEvent *) malloc (sizeof (AdentuEvent)); tmp.type = event->type = ADENTU_EVENT_GFC; tmp.time = event->time = DBL_MAX; tmp.eventData = event->eventData = NULL; for (int i = 0; i < nGrains; ++i) { adentu_neighbourhood_get_cell_neighbourhood (cells[i], model->gGrid, neighCells); neighbours = adentu_neighbourhood_get_atoms (&nAtoms, neighCells, model->fGrid); if (!nAtoms) continue ; CUDA_CALL (cudaMalloc ((void **)&d_neighbours, nAtoms * sizeof (int))); CUDA_CALL (cudaMemcpy (d_neighbours, neighbours, nAtoms * sizeof (int), cudaMemcpyHostToDevice)); adentu_cuda_set_grid (&gDim, &bDim, nAtoms); kevent = (AdentuEvent *) malloc (gDim.x * sizeof (AdentuEvent)); CUDA_CALL (cudaMalloc ((void **)&d_kevent, gDim.x * sizeof (AdentuEvent))); adentu_event_gfc_cuda_grain_vs_fluid_kernel<<<gDim, bDim>>> (d_kevent, g_pos[i], g_vel[i], grain->radius[i], d_f_pos, d_f_vel, d_neighbours, nAtoms); CUDA_CALL (cudaMemcpy (kevent, d_kevent, gDim.x * sizeof (AdentuEvent), cudaMemcpyDeviceToHost)); tmp.partner = kevent[0].partner; tmp.time = kevent[0].time; for (int j = 0; j < gDim.x; ++j) if (kevent[j].time < tmp.time) { tmp.time = kevent[j].time; tmp.partner = kevent[j].partner; } if (tmp.time < event->time) { event->time = tmp.time; event->owner = i; event->partner = tmp.partner; event->nEvents = fluid->nCol[tmp.partner]; } free (kevent); CUDA_CALL (cudaFree (d_kevent)); CUDA_CALL (cudaFree (d_neighbours)); free (neighbours); } CUDA_CALL (cudaFree (d_g_pos)); CUDA_CALL (cudaFree (d_f_pos)); CUDA_CALL (cudaFree (d_g_vel)); CUDA_CALL (cudaFree (d_f_vel)); //CUDA_CALL (cudaFree (d_cells)); free (cells); return event; } __global__ void adentu_event_gfc_cuda_get_cells_kernel (int *cells, vec3f *pos, vec3i nCell, vec3f origin, vec3f h, int nAtoms) { int idx = threadIdx.x + blockIdx.x * gridDim.x; if (idx >= nAtoms) return ; vec3f cell, p = pos[idx]; cell.x = (int) (p.x + origin.x)/h.x; cell.y = (int) (p.y + origin.y)/h.y; cell.z = (int) (p.z + origin.z)/h.z; cells[idx] = nCell.x * nCell.y * cell.z + nCell.x * cell.y + cell.x; } __global__ void adentu_event_gfc_cuda_grain_vs_fluid_kernel (AdentuEvent *ev, vec3f gpos, vec3f gvel, double radius, vec3f *fpos, vec3f *fvel, int *neighbours, int nAtoms) { int idx = threadIdx.x + blockIdx.x * gridDim.x; int tid = threadIdx.x; int bid = blockIdx.x; __shared__ AdentuEvent events[128]; events[tid].time = -1; __syncthreads (); if (idx >= nAtoms) return ; vec3f pos, vel; vec3f f_pos = fpos[idx]; vec3f f_vel = fvel[idx]; vecSub (pos, f_pos, gpos); vecSub (vel, f_vel, gvel); double PP, disc, num, den, time; PP = vecDot (pos, vel); if (PP < 0.0) { disc = (PP * PP) - (vecMod(vel) * vecMod(vel)) * (vecMod(pos) * vecMod(pos)) - (radius * radius); if (disc > 0) { num = -PP - sqrt (disc); den = (vecMod (vel) * vecMod (vel)); time = num/den; if (time > 0.0) { events[tid].partner = idx; events[tid].time = time; } } } __syncthreads (); int n = 64; while (n != 1 && tid < n) { if (events[tid].time != -1 && events[tid+n].time != -1 && events[tid].time > events[tid+n].time) { events[tid].partner = events[tid+n].partner; events[tid].time = events[tid+n].time; } n /= 2; } __syncthreads (); if (tid == 0) { ev[bid].type = ADENTU_EVENT_GFC; ev[bid].partner = events[0].partner; ev[bid].time = events[0].time; } }
1077adda77d4d851160bf4d87d7f5c303e006400.hip
// !!! This is a file automatically generated by hipify!!! #include "intellif_minning_dbscan_impl_DBSCANImpl.h" #include "hip/hip_runtime.h" #include "hip/device_functions.h" #include "rocblas.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <ctime> #include <math.h> #include <queue> #include <string.h> #include <stdlib.h> #include <vector> #include<stdio.h> #include<algorithm> #include<memory> //APICUDAAPI #define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__) //CUDA Runtime #define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__) inline void checkCudaError(hipError_t error, const char *file, const int line) { if (error != hipSuccess) { std::cerr << "CUDA CALL FAILED:" << file << "( " << line << ")- " << hipGetErrorString(error) << std::endl; exit(EXIT_FAILURE); } } inline void checkCudaState(const char *msg, const char *file, const int line) { hipError_t error = hipGetLastError(); if (error != hipSuccess) { std::cerr << "---" << msg << " Error---" << std::endl; std::cerr << file << "( " << line << ")- " << hipGetErrorString(error) << std::endl; exit(EXIT_FAILURE); } } using namespace std; struct Point { float dimensions[128]; int cluster; int noise; //-1 noise; //string img; }; float eps;//neighborhood radius int min_nb; int n; Point *host_sample; int block_num = 96; int thread_num = 32; float __device__ dev_euclidean_distance(const Point &src, const Point &dest) { float res = 0.0; for(int i=0; i<128; i++){ res += (src.dimensions[i] - dest.dimensions[i]) * (src.dimensions[i] - dest.dimensions[i]); } return sqrt(res); } /*to get the total list*/ void __global__ dev_region_query(Point* sample, int num, int* neighbors, float eps, int min_nb) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int line,col,pointer = tid; unsigned int count; while (pointer < num * num) {//id line = pointer / num; col = pointer % num; float radius; if (line <= col) { radius = dev_euclidean_distance(sample[line], sample[col]); if (radius <= eps) { neighbors[pointer] = 1; } neighbors[col * num + line] = neighbors[pointer];// } pointer += blockDim.x * gridDim.x; } __syncthreads(); pointer = tid; while (pointer < num) { count = 1; line = pointer * num; for (int i = 0; i < num; i++) { if (pointer != i && neighbors[line+i]) {//p count++; } } if (count >= min_nb) { sample[pointer].noise++; } pointer += blockDim.x * gridDim.x; } } void host_algorithm_dbscan() { int num = n; /*sample*/ Point* cuda_sample; CHECK_ERROR(hipMalloc((void**)&cuda_sample, num * sizeof(Point))); CHECK_ERROR(hipMemcpy(cuda_sample, host_sample, num * sizeof(Point), hipMemcpyHostToDevice)); /*neighbor list*/ int *host_neighbor = new int[num*num](); int *dev_neighbor; CHECK_ERROR(hipMalloc((void**)&dev_neighbor, num * num * sizeof(int))); dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb); hipDeviceSynchronize(); CHECK_STATE("kernel call"); CHECK_ERROR(hipMemcpy(host_sample, cuda_sample, num * sizeof(Point), hipMemcpyDeviceToHost)); CHECK_ERROR(hipMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), hipMemcpyDeviceToHost)); hipFree(cuda_sample); hipFree(dev_neighbor); queue<int> expand; int cur_cluster = 0; for (int i = 0; i < num; i++) { if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) { host_sample[i].cluster = ++cur_cluster; int src = i * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j]) { host_sample[j].cluster = cur_cluster; expand.push(j); } } while (!expand.empty()) {/*expand the cluster*/ if (host_sample[expand.front()].noise >= 0) { src = expand.front() * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j] && host_sample[j].cluster < 1) { host_sample[j].cluster = cur_cluster; expand.push(j); } } } expand.pop(); } } } } // int countLines(const char *filename){ ifstream fin(filename, ios::in); int n=0; string lineStr; while(getline(fin, lineStr)) n++; return n; } //tab string Trim(string& str) { //str.find_first_not_of(" \t\r\n"),str0"\t\r\n" str.erase(0, str.find_first_not_of(" \t\r\n")); str.erase(str.find_last_not_of(" \t\r\n") + 1); return str; } JNIEXPORT jboolean JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_initDatasFromFile (JNIEnv *env, jobject obj, jstring jfile, jint jcount) { const char *file = env->GetStringUTFChars(jfile, NULL);// java file n = (int)jcount;// java count try { host_sample = new Point[n];// } catch (const std::exception& e) { cerr << "alloca arrays exception: " << e.what() << endl; exit(EXIT_FAILURE); } ifstream fin(file); // if (!fin) { cout << "file not found" << endl; exit(EXIT_FAILURE); } string line; int point_count = 0; while (getline(fin, line)) //\neof { istringstream sin(line); //lineistringstream vector<string> fields; // string field; while (getline(sin, field, ',')) //sinfield { fields.push_back(field); //fields } string alls = Trim(fields[0]); // //fieldsfeatures() size_t pos = alls.find(" "); string features = alls.substr(pos + 1);// temptemp istringstream featurestream(features); string feature; int dims = 0; while (getline(featurestream, feature, ' ')) { host_sample[point_count].dimensions[dims++] = stof(feature); } //host_sample[point_count].img = Trim(fields[1]); //fieldsimg host_sample[point_count].noise = -1; host_sample[point_count].cluster = -1; point_count++; if (point_count >= n) { break; } } env->ReleaseStringUTFChars(jfile, file); cout << "init points from file success" << endl; return (jboolean)true; } // dbscan JNIEXPORT void JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_runDBSCAN (JNIEnv *env, jobject obj, jfloat jeps, jint jminPts) { eps = (float)jeps; min_nb = (int)jminPts; clock_t start, finish; start = clock(); // host_algorithm_dbscan(); finish = clock(); cout << "dbscan success" << endl; cout << n << " speed time: " << (finish - start)*1.0 / CLOCKS_PER_SEC << "s\n" << endl; } JNIEXPORT jstring JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_saveDBSCAN (JNIEnv *env, jobject obj) { ofstream fout; char resultFile[128]; sprintf(resultFile, "%d_result.csv", n); fout.open(resultFile); for (int i = 0; i < n; i++) { fout << i << "," << host_sample[i].cluster << endl; } fout.close(); // delete []host_sample; cout << "save result success" << endl; return env->NewStringUTF(resultFile); }
1077adda77d4d851160bf4d87d7f5c303e006400.cu
#include "intellif_minning_dbscan_impl_DBSCANImpl.h" #include "cuda_runtime.h" #include "device_functions.h" #include "cublas_v2.h" #include "device_launch_parameters.h" #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> #include <ctime> #include <math.h> #include <queue> #include <string.h> #include <stdlib.h> #include <vector> #include<stdio.h> #include<algorithm> #include<memory> //API调用错误处理,可以接受CUDA的API函数调用作为参数 #define CHECK_ERROR(error) checkCudaError(error, __FILE__, __LINE__) //检查CUDA Runtime状态码,可以接受一个指定的提示信息 #define CHECK_STATE(msg) checkCudaState(msg, __FILE__, __LINE__) inline void checkCudaError(cudaError_t error, const char *file, const int line) { if (error != cudaSuccess) { std::cerr << "CUDA CALL FAILED:" << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl; exit(EXIT_FAILURE); } } inline void checkCudaState(const char *msg, const char *file, const int line) { cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { std::cerr << "---" << msg << " Error---" << std::endl; std::cerr << file << "( " << line << ")- " << cudaGetErrorString(error) << std::endl; exit(EXIT_FAILURE); } } using namespace std; struct Point { float dimensions[128]; int cluster; int noise; //-1 noise; //string img; }; float eps;//neighborhood radius int min_nb; int n; Point *host_sample; int block_num = 96; int thread_num = 32; float __device__ dev_euclidean_distance(const Point &src, const Point &dest) { float res = 0.0; for(int i=0; i<128; i++){ res += (src.dimensions[i] - dest.dimensions[i]) * (src.dimensions[i] - dest.dimensions[i]); } return sqrt(res); } /*to get the total list*/ void __global__ dev_region_query(Point* sample, int num, int* neighbors, float eps, int min_nb) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int line,col,pointer = tid; unsigned int count; while (pointer < num * num) {//全场唯一id line = pointer / num; col = pointer % num; float radius; if (line <= col) { radius = dev_euclidean_distance(sample[line], sample[col]); if (radius <= eps) { neighbors[pointer] = 1; } neighbors[col * num + line] = neighbors[pointer];//对角线 } pointer += blockDim.x * gridDim.x; } __syncthreads(); pointer = tid; while (pointer < num) { count = 1; line = pointer * num; for (int i = 0; i < num; i++) { if (pointer != i && neighbors[line+i]) {//包含p点邻域元素个数 count++; } } if (count >= min_nb) { sample[pointer].noise++; } pointer += blockDim.x * gridDim.x; } } void host_algorithm_dbscan() { int num = n; /*sample*/ Point* cuda_sample; CHECK_ERROR(cudaMalloc((void**)&cuda_sample, num * sizeof(Point))); CHECK_ERROR(cudaMemcpy(cuda_sample, host_sample, num * sizeof(Point), cudaMemcpyHostToDevice)); /*neighbor list*/ int *host_neighbor = new int[num*num](); int *dev_neighbor; CHECK_ERROR(cudaMalloc((void**)&dev_neighbor, num * num * sizeof(int))); dev_region_query << <block_num, thread_num >> > (cuda_sample, num, dev_neighbor, eps, min_nb); cudaDeviceSynchronize(); CHECK_STATE("kernel call"); CHECK_ERROR(cudaMemcpy(host_sample, cuda_sample, num * sizeof(Point), cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaMemcpy(host_neighbor, dev_neighbor, num * num * sizeof(int), cudaMemcpyDeviceToHost)); cudaFree(cuda_sample); cudaFree(dev_neighbor); queue<int> expand; int cur_cluster = 0; for (int i = 0; i < num; i++) { if (host_sample[i].noise >= 0 && host_sample[i].cluster < 1) { host_sample[i].cluster = ++cur_cluster; int src = i * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j]) { host_sample[j].cluster = cur_cluster; expand.push(j); } } while (!expand.empty()) {/*expand the cluster*/ if (host_sample[expand.front()].noise >= 0) { src = expand.front() * num; for (int j = 0; j < num; j++) { if (host_neighbor[src + j] && host_sample[j].cluster < 1) { host_sample[j].cluster = cur_cluster; expand.push(j); } } } expand.pop(); } } } } // 读取文件行数 int countLines(const char *filename){ ifstream fin(filename, ios::in); int n=0; string lineStr; while(getline(fin, lineStr)) n++; return n; } //删除字符串中空格,制表符tab等无效字符 string Trim(string& str) { //str.find_first_not_of(" \t\r\n"),在字符串str中从索引0开始,返回首次不匹配"\t\r\n"的位置 str.erase(0, str.find_first_not_of(" \t\r\n")); str.erase(str.find_last_not_of(" \t\r\n") + 1); return str; } JNIEXPORT jboolean JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_initDatasFromFile (JNIEnv *env, jobject obj, jstring jfile, jint jcount) { const char *file = env->GetStringUTFChars(jfile, NULL);// 从java 得到file n = (int)jcount;// 从java 得到 count try { host_sample = new Point[n];// 分配数组空间 } catch (const std::exception& e) { cerr << "alloca arrays exception: " << e.what() << endl; exit(EXIT_FAILURE); } ifstream fin(file); //打开文件流操作 if (!fin) { cout << "file not found" << endl; exit(EXIT_FAILURE); } string line; int point_count = 0; while (getline(fin, line)) //整行读取,换行符“\n”区分,遇到文件尾标志eof终止读取 { istringstream sin(line); //将整行字符串line读入到字符串流istringstream中 vector<string> fields; //声明一个字符串向量 string field; while (getline(sin, field, ',')) //将字符串流sin中的字符读入到field字符串中,以逗号为分隔符 { fields.push_back(field); //将刚刚读取的字符串添加到向量fields中 } string alls = Trim(fields[0]); // 文件中每行都是一个字符串 //清除掉向量fields中第一个元素的无效字符,并赋值给变量features(特征值字符串) size_t pos = alls.find(" "); string features = alls.substr(pos + 1);// 特征值转换,并初始化temp,此时的temp顺序是按照文件中读取的顺序 istringstream featurestream(features); string feature; int dims = 0; while (getline(featurestream, feature, ' ')) { host_sample[point_count].dimensions[dims++] = stof(feature); } //host_sample[point_count].img = Trim(fields[1]); //清除掉向量fields中第二个元素的无效字符,并赋值给变量img host_sample[point_count].noise = -1; host_sample[point_count].cluster = -1; point_count++; if (point_count >= n) { break; } } env->ReleaseStringUTFChars(jfile, file); cout << "init points from file success" << endl; return (jboolean)true; } // dbscan JNIEXPORT void JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_runDBSCAN (JNIEnv *env, jobject obj, jfloat jeps, jint jminPts) { eps = (float)jeps; min_nb = (int)jminPts; clock_t start, finish; start = clock(); // 聚类 host_algorithm_dbscan(); finish = clock(); cout << "dbscan success" << endl; cout << n << " speed time: " << (finish - start)*1.0 / CLOCKS_PER_SEC << "s\n" << endl; } JNIEXPORT jstring JNICALL Java_intellif_minning_dbscan_impl_DBSCANImpl_saveDBSCAN (JNIEnv *env, jobject obj) { ofstream fout; char resultFile[128]; sprintf(resultFile, "%d_result.csv", n); fout.open(resultFile); for (int i = 0; i < n; i++) { fout << i << "," << host_sample[i].cluster << endl; } fout.close(); // 释放内存 delete []host_sample; cout << "save result success" << endl; return env->NewStringUTF(resultFile); }
86a614b657f292fdd3a9b024c7c5cf04bf6a3e2b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_tanh (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(tanh)(x[offset_x + gid * stride_x]); } }
86a614b657f292fdd3a9b024c7c5cf04bf6a3e2b.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_tanh (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { y[offset_y + gid * stride_y] = CAST(tanh)(x[offset_x + gid * stride_x]); } }
1e676eee1d4e5879fea4a053a4cbc4f8acd0208c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- //***************************************************************************************/ // // Based on Pointnet2 Library (MIT License): // https://github.com/sshaoshuai/Pointnet2.PyTorch // // Copyright (c) 2019 Shaoshuai Shi // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // //***************************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "ATen/hip/HIPContext.h" #include "open3d/ml/contrib/BallQuery.cuh" #include "open3d/ml/contrib/cuda_utils.h" #include "open3d/ml/pytorch/pointnet/BallQueryKernel.h" using namespace open3d::ml::contrib; void ball_query_launcher(int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { // new_xyz: (B, M, 3) // xyz: (B, N, 3) // output: // idx: (B, M, nsample) hipError_t err; auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( ball_query_kernel), dim3(blocks), dim3(threads), 0, stream, b, n, m, radius, nsample, new_xyz, xyz, idx); // hipDeviceSynchronize(); // for using printf in kernel function err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
1e676eee1d4e5879fea4a053a4cbc4f8acd0208c.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- //***************************************************************************************/ // // Based on Pointnet2 Library (MIT License): // https://github.com/sshaoshuai/Pointnet2.PyTorch // // Copyright (c) 2019 Shaoshuai Shi // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // //***************************************************************************************/ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "ATen/cuda/CUDAContext.h" #include "open3d/ml/contrib/BallQuery.cuh" #include "open3d/ml/contrib/cuda_utils.h" #include "open3d/ml/pytorch/pointnet/BallQueryKernel.h" using namespace open3d::ml::contrib; void ball_query_launcher(int b, int n, int m, float radius, int nsample, const float *new_xyz, const float *xyz, int *idx) { // new_xyz: (B, M, 3) // xyz: (B, N, 3) // output: // idx: (B, M, nsample) cudaError_t err; auto stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); ball_query_kernel<<<blocks, threads, 0, stream>>>(b, n, m, radius, nsample, new_xyz, xyz, idx); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
ab2b83991ad771ed9b5e3f38eb56ff6acb3f3c05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <vector> #include <algorithm> #include <numeric> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> //#include <opencv2/core/opengl_interop.hpp> #include <opencv2/core/cuda_devptrs.hpp> #include <opencv2/gpu/gpu.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/contrib/contrib.hpp> #include <opencv2/gpu/stream_accessor.hpp> #include <opencv2/gpu/gpumat.hpp> #include <time.h> #include "parameters.h" #define WIDTH2 1920 #define HEIGHT2 1920 #define FINALWIDTH 3874 #define FINALHEIGHT 1920 #define LEFT1 1261 #define LEFT2 1909 #define LEFT3 2564 #define RIGHT 660 #define MIDDLEWIDTH 50 using namespace cv; using namespace cv::gpu; #include<iostream> using namespace std; __global__ void stitch_kernel(const PtrStepSz<uchar3> src1, const PtrStepSz<uchar3> src2, PtrStep<uchar3> dst, int limit, int left) { int abs_x = threadIdx.x + blockDim.x * blockIdx.x; int abs_y = threadIdx.y + blockDim.y * blockIdx.y; if (abs_y < 0 || abs_y >= limit || abs_x < 0 || abs_x >= MIDDLEWIDTH) { return; } int pointOnLeft = abs_x + left; int pointOnRight = RIGHT - MIDDLEWIDTH + abs_x; uchar3 value1 = src1(abs_y, pointOnLeft); uchar3 value2 = src2(abs_y, pointOnRight); unsigned char newRed = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.x + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.x; unsigned char newGreen = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.y + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.y; unsigned char newBlue = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.z + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.z; dst(abs_y, pointOnLeft) = make_uchar3(newRed, newGreen, newBlue); } void stitch_caller(const PtrStepSz<uchar3>& src1, const PtrStepSz<uchar3>& src2, PtrStep<uchar3> dst, int limit, int left, hipStream_t stream) { int blockWidth = 32; int blockHeight = 8; dim3 blockSize(blockWidth, blockHeight); int blocksY = src1.rows / blockHeight; int blocksX = src1.cols / blockWidth; const dim3 gridSize(blocksX, blocksY); stitch_kernel << <gridSize, blockSize, 0, stream >> >(src1, src2, dst, limit, left); if (stream == 0) { hipDeviceSynchronize(); } } void stitch(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int limit, int left, Stream& stream = Stream::Null()) { CV_Assert(src1.type() == CV_8UC3); CV_Assert(src2.type() == CV_8UC3); hipStream_t s = StreamAccessor::getStream(stream); stitch_caller(src1, src2, dst, limit, left, s); } int main(int argc, char* argv[]) { const string fname = "D:\\Documents\\Movies\\out1.avi"; const string fname2 = "D:\\Documents\\Movies\\out2.avi"; const string fname3 = "D:\\Documents\\Movies\\out3.avi"; const string fname4 = "D:\\Documents\\Movies\\out4.avi"; std::string fname5 = "D:\\Documents\\Movies\\Project\\attempt3\\1.avi"; const string fname6 = "D:\\Documents\\Movies\\Project\\attempt3\\2.avi"; const string fname7 = "D:\\Documents\\Movies\\Project\\attempt3\\3.avi"; const string fname8 = "D:\\Documents\\Movies\\Project\\attempt3\\4.avi"; time_t finish, start; //string mm = cv::getBuildInformation(); //cout << mm << endl; //cv::namedWindow("CPU", cv::WINDOW_NORMAL); //cv::namedWindow("GPU", cv::WINDOW_OPENGL); //gpu::setGlDevice(); Mat frame,frame2,frame3,frame4; Mat framecorrected(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected2(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected3(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected4(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); GpuMat frame_d, frame2_d, frame3_d, frame4_d; GpuMat framecorrected_d, framecorrected2_d, framecorrected3_d, framecorrected4_d; Mat result(1566, 640, CV_8UC3, Scalar(0, 0, 0)); cv::VideoCapture reader(fname5); cv::VideoCapture reader2(fname6); cv::VideoCapture reader3(fname7); cv::VideoCapture reader4(fname8); CvSize frameSize = cvSize(int(reader.get(CV_CAP_PROP_FRAME_WIDTH)), int(reader.get(CV_CAP_PROP_FRAME_HEIGHT))); printf("height:%d\n", frameSize.height); printf("width:%d\n", frameSize.width); double count = reader.get(CV_CAP_PROP_FRAME_COUNT); printf("number of frames = %d\n", count); int n = 0; //reader.set(CV_CAP_PROP_POS_FRAMES, count - 1); // cv::gpu::GpuMat d_frame; // cv::gpu::VideoReader_GPU d_reader(fname); //d_reader.dumpFormat(std::cout); cv::TickMeter tm; std::vector<double> cpu_times; // std::vector<double> gpu_times; cv::Mat x_map2(HEIGHT2, WIDTH2, CV_32FC1, &_x_map_1920); cv::Mat y_map2(HEIGHT2, WIDTH2, CV_32FC1, &_y_map_1920); GpuMat x_map2_d, y_map2_d; x_map2_d.upload(x_map2); y_map2_d.upload(y_map2); for (;;) { tm.reset(); tm.start(); if (!reader.read(frame) || !reader2.read(frame2) || !reader3.read(frame3) || !reader4.read(frame4)) break; /*if (n == 0) { cv::imwrite("D:\\Documents\\Images\\attempt3_5.jpg",frame); } n++;*/ tm.stop(); cpu_times.push_back(tm.getTimeMilli()); // tm.reset(); tm.start(); //if (!d_reader.read(d_frame)) //break; //tm.stop(); //gpu_times.push_back(tm.getTimeMilli()); time(&start); frame_d.upload(frame); frame2_d.upload(frame2); frame3_d.upload(frame3); frame4_d.upload(frame4); remap(frame_d, framecorrected_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame2_d, framecorrected2_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame3_d, framecorrected3_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame4_d, framecorrected4_d, x_map2_d, y_map2_d, INTER_LINEAR); // int width = LEFT1 + MIDDLEWIDTH + framecorrected.cols - RIGHT; Mat result(FINALHEIGHT, FINALWIDTH, CV_8UC3, Scalar(0, 0, 0)); //GpuMat combine(1920, width, CV_8UC3, Scalar(0, 0, 0)); GpuMat result_d(FINALHEIGHT, FINALWIDTH, CV_8UC3, Scalar(0, 0, 0)); GpuMat left_roi_d(result_d, Rect(0, 0, LEFT1, framecorrected_d.size().height)); Size middleSize(MIDDLEWIDTH, framecorrected_d.size().height); GpuMat middle_zone_d(middleSize, CV_8UC3, Scalar(0, 0, 0)); GpuMat croppedImage1; Rect Roi1(0, 0, LEFT1, framecorrected_d.size().height); croppedImage1 = framecorrected_d(Roi1); croppedImage1.copyTo(left_roi_d); GpuMat second_roi_d(result_d,Rect(LEFT1+MIDDLEWIDTH,0,framecorrected2_d.cols-RIGHT,framecorrected2_d.rows)); GpuMat croppedImage2; Rect Roi2(RIGHT, 0, framecorrected2_d.cols - RIGHT, framecorrected2_d.rows); croppedImage2 = framecorrected2_d(Roi2); croppedImage2.copyTo(second_roi_d); stitch(framecorrected_d, framecorrected2_d, result_d, framecorrected_d.rows, LEFT1); GpuMat third_roi_d(result_d, Rect(LEFT2 + MIDDLEWIDTH, 0, framecorrected3_d.cols - RIGHT, framecorrected3_d.rows)); GpuMat croppedImage3; croppedImage3 = framecorrected3_d(Roi2); croppedImage3.copyTo(third_roi_d); stitch(result_d, framecorrected3_d, result_d, result_d.rows, LEFT2); GpuMat fourth_roi_d(result_d, Rect(LEFT3 + MIDDLEWIDTH, 0, framecorrected4_d.cols - RIGHT, framecorrected4_d.rows)); GpuMat croppedImage4 = framecorrected4_d(Roi2); croppedImage4.copyTo(fourth_roi_d); stitch(result_d, framecorrected4_d, result_d, result_d.rows, LEFT3); result_d.download(result); //framecorrected_d.download(framecorrected); //framecorrected2_d.download(framecorrected2); //framecorrected3_d.download(framecorrected3); //framecorrected4_d.download(framecorrected4); time(&finish); /*remap(frame, framecorrected, x_map2, y_map2, INTER_LINEAR); remap(frame2, framecorrected2, x_map2, y_map2, INTER_LINEAR); remap(frame3, framecorrected3, x_map2, y_map2, INTER_LINEAR); remap(frame4, framecorrected4, x_map2, y_map2, INTER_LINEAR);*/ //namedWindow("CPU", WINDOW_NORMAL); //namedWindow("CPU2", WINDOW_NORMAL); //namedWindow("CPU3", WINDOW_NORMAL); //namedWindow("CPU4", WINDOW_NORMAL); //cv::imshow("CPU", framecorrected); //imshow("CPU2", framecorrected2); //imshow("CPU3", framecorrected3); //imshow("CPU4", framecorrected4); namedWindow("result", WINDOW_NORMAL); imshow("result", result); /**Mat left_roi(res, Rect(0, 0, LEFT1, src.size().height)); Size middleSize(MIDDLEWIDTH, src.size().height);//added Mat middleZone(middleSize, CV_8UC3, Scalar(0, 0, 0));//added Mat croppedImage1; // Rect Roi1(0,0,LEFT,image2.rows); Rect Roi1(0, 0, LEFT1, src.rows); // croppedImage1 = image2(Roi1); croppedImage1 = src(Roi1); croppedImage1.copyTo(left_roi); //printf("largest1 = %d, %d,%d\n",(int)largest1,(int)(image1.cols-largest2),image1.rows); // Mat right_roi(combine,Rect(LEFT+MIDDLEWIDTH,0,image1.cols - RIGHT,image1.rows)); Mat second_roi(res, Rect(LEFT1 + MIDDLEWIDTH, 0, src2.cols - RIGHT, src2.rows)); //Mat right_roi(combine,Rect(largest1,0,image1.cols-largest2,image1.rows)); Mat croppedImage2; //Rect Roi2(RIGHT,0,image1.cols-RIGHT,image1.rows); Rect Roi2(RIGHT, 0, src2.cols - RIGHT, src2.rows); //croppedImage2 = image1(Roi2); croppedImage2 = src2(Roi2); // croppedImage2.copyTo(right_roi); croppedImage2.copyTo(second_roi); //imshow("combine",combine); //imwrite("combined.jpg",combine); //Mat_<Vec3b> _orgImg1 = image2; // Mat_<Vec3b> _orgImg2 = image1; Mat_<Vec3b> _orgImg1 = src; Mat_<Vec3b> _orgImg2 = src2; //Mat_<Vec3b> _retImg = combine; Mat_<Vec3b> _retImg = res; //vector<Mat> channels; //split(combine,channels); for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT1; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT1 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT1 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT1 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * YellowTwo; // double newRed = (((LEFT+MIDDLEWIDTH) - pointOnLeft)/(MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft- LEFT)/(MIDDLEWIDTH*1.0)) * RedTwo; // double newGreen = ((LEFT+MIDDLEWIDTH-pointOnLeft)/(MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft- LEFT1)/(MIDDLEWIDTH*1.0)) * GreenTwo; // double newYellow = ((LEFT+MIDDLEWIDTH-pointOnLeft)/(MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft- LEFT)/(MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } } Mat third_roi(res, Rect(LEFT2 + MIDDLEWIDTH, 0, src3.cols - RIGHT, src3.rows)); Mat croppedImage3 = src3(Roi2); croppedImage3.copyTo(third_roi); _orgImg1 = res; _orgImg2 = src3; _retImg = res; for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT2; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT2 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT2 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT2 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } } Mat fourth_roi(res, Rect(LEFT3 + MIDDLEWIDTH, 0, src4.cols - RIGHT, src4.rows)); Mat croppedImage4 = src4(Roi2); croppedImage4.copyTo(fourth_roi); _orgImg1 = res; _orgImg2 = src4; _retImg = res; for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT3; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT3 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT3 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT3 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } }*/ //cv::imshow("GPU", d_frame); if (cv::waitKey(3) > 0) break; } if (!cpu_times.empty() )//&& !gpu_times.empty()) { std::cout << std::endl << "Results:" << std::endl; std::sort(cpu_times.begin(), cpu_times.end()); // std::sort(gpu_times.begin(), gpu_times.end()); double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size(); // double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size(); std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl; //std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl; } printf("difference in time = %d\n", difftime(finish,start)); cvDestroyAllWindows(); system("pause"); return 0; }
ab2b83991ad771ed9b5e3f38eb56ff6acb3f3c05.cu
#include <iostream> #include <string> #include <vector> #include <algorithm> #include <numeric> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> //#include <opencv2/core/opengl_interop.hpp> #include <opencv2/core/cuda_devptrs.hpp> #include <opencv2/gpu/gpu.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/contrib/contrib.hpp> #include <opencv2/gpu/stream_accessor.hpp> #include <opencv2/gpu/gpumat.hpp> #include <time.h> #include "parameters.h" #define WIDTH2 1920 #define HEIGHT2 1920 #define FINALWIDTH 3874 #define FINALHEIGHT 1920 #define LEFT1 1261 #define LEFT2 1909 #define LEFT3 2564 #define RIGHT 660 #define MIDDLEWIDTH 50 using namespace cv; using namespace cv::gpu; #include<iostream> using namespace std; __global__ void stitch_kernel(const PtrStepSz<uchar3> src1, const PtrStepSz<uchar3> src2, PtrStep<uchar3> dst, int limit, int left) { int abs_x = threadIdx.x + blockDim.x * blockIdx.x; int abs_y = threadIdx.y + blockDim.y * blockIdx.y; if (abs_y < 0 || abs_y >= limit || abs_x < 0 || abs_x >= MIDDLEWIDTH) { return; } int pointOnLeft = abs_x + left; int pointOnRight = RIGHT - MIDDLEWIDTH + abs_x; uchar3 value1 = src1(abs_y, pointOnLeft); uchar3 value2 = src2(abs_y, pointOnRight); unsigned char newRed = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.x + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.x; unsigned char newGreen = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.y + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.y; unsigned char newBlue = (((left + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * value1.z + ((pointOnLeft - left) / (MIDDLEWIDTH*1.0)) * value2.z; dst(abs_y, pointOnLeft) = make_uchar3(newRed, newGreen, newBlue); } void stitch_caller(const PtrStepSz<uchar3>& src1, const PtrStepSz<uchar3>& src2, PtrStep<uchar3> dst, int limit, int left, cudaStream_t stream) { int blockWidth = 32; int blockHeight = 8; dim3 blockSize(blockWidth, blockHeight); int blocksY = src1.rows / blockHeight; int blocksX = src1.cols / blockWidth; const dim3 gridSize(blocksX, blocksY); stitch_kernel << <gridSize, blockSize, 0, stream >> >(src1, src2, dst, limit, left); if (stream == 0) { cudaDeviceSynchronize(); } } void stitch(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, int limit, int left, Stream& stream = Stream::Null()) { CV_Assert(src1.type() == CV_8UC3); CV_Assert(src2.type() == CV_8UC3); cudaStream_t s = StreamAccessor::getStream(stream); stitch_caller(src1, src2, dst, limit, left, s); } int main(int argc, char* argv[]) { const string fname = "D:\\Documents\\Movies\\out1.avi"; const string fname2 = "D:\\Documents\\Movies\\out2.avi"; const string fname3 = "D:\\Documents\\Movies\\out3.avi"; const string fname4 = "D:\\Documents\\Movies\\out4.avi"; std::string fname5 = "D:\\Documents\\Movies\\Project\\attempt3\\1.avi"; const string fname6 = "D:\\Documents\\Movies\\Project\\attempt3\\2.avi"; const string fname7 = "D:\\Documents\\Movies\\Project\\attempt3\\3.avi"; const string fname8 = "D:\\Documents\\Movies\\Project\\attempt3\\4.avi"; time_t finish, start; //string mm = cv::getBuildInformation(); //cout << mm << endl; //cv::namedWindow("CPU", cv::WINDOW_NORMAL); //cv::namedWindow("GPU", cv::WINDOW_OPENGL); //gpu::setGlDevice(); Mat frame,frame2,frame3,frame4; Mat framecorrected(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected2(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected3(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); Mat framecorrected4(HEIGHT2, WIDTH2, CV_8UC3, Scalar(0, 0, 0)); GpuMat frame_d, frame2_d, frame3_d, frame4_d; GpuMat framecorrected_d, framecorrected2_d, framecorrected3_d, framecorrected4_d; Mat result(1566, 640, CV_8UC3, Scalar(0, 0, 0)); cv::VideoCapture reader(fname5); cv::VideoCapture reader2(fname6); cv::VideoCapture reader3(fname7); cv::VideoCapture reader4(fname8); CvSize frameSize = cvSize(int(reader.get(CV_CAP_PROP_FRAME_WIDTH)), int(reader.get(CV_CAP_PROP_FRAME_HEIGHT))); printf("height:%d\n", frameSize.height); printf("width:%d\n", frameSize.width); double count = reader.get(CV_CAP_PROP_FRAME_COUNT); printf("number of frames = %d\n", count); int n = 0; //reader.set(CV_CAP_PROP_POS_FRAMES, count - 1); // cv::gpu::GpuMat d_frame; // cv::gpu::VideoReader_GPU d_reader(fname); //d_reader.dumpFormat(std::cout); cv::TickMeter tm; std::vector<double> cpu_times; // std::vector<double> gpu_times; cv::Mat x_map2(HEIGHT2, WIDTH2, CV_32FC1, &_x_map_1920); cv::Mat y_map2(HEIGHT2, WIDTH2, CV_32FC1, &_y_map_1920); GpuMat x_map2_d, y_map2_d; x_map2_d.upload(x_map2); y_map2_d.upload(y_map2); for (;;) { tm.reset(); tm.start(); if (!reader.read(frame) || !reader2.read(frame2) || !reader3.read(frame3) || !reader4.read(frame4)) break; /*if (n == 0) { cv::imwrite("D:\\Documents\\Images\\attempt3_5.jpg",frame); } n++;*/ tm.stop(); cpu_times.push_back(tm.getTimeMilli()); // tm.reset(); tm.start(); //if (!d_reader.read(d_frame)) //break; //tm.stop(); //gpu_times.push_back(tm.getTimeMilli()); time(&start); frame_d.upload(frame); frame2_d.upload(frame2); frame3_d.upload(frame3); frame4_d.upload(frame4); remap(frame_d, framecorrected_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame2_d, framecorrected2_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame3_d, framecorrected3_d, x_map2_d, y_map2_d, INTER_LINEAR); remap(frame4_d, framecorrected4_d, x_map2_d, y_map2_d, INTER_LINEAR); // int width = LEFT1 + MIDDLEWIDTH + framecorrected.cols - RIGHT; Mat result(FINALHEIGHT, FINALWIDTH, CV_8UC3, Scalar(0, 0, 0)); //GpuMat combine(1920, width, CV_8UC3, Scalar(0, 0, 0)); GpuMat result_d(FINALHEIGHT, FINALWIDTH, CV_8UC3, Scalar(0, 0, 0)); GpuMat left_roi_d(result_d, Rect(0, 0, LEFT1, framecorrected_d.size().height)); Size middleSize(MIDDLEWIDTH, framecorrected_d.size().height); GpuMat middle_zone_d(middleSize, CV_8UC3, Scalar(0, 0, 0)); GpuMat croppedImage1; Rect Roi1(0, 0, LEFT1, framecorrected_d.size().height); croppedImage1 = framecorrected_d(Roi1); croppedImage1.copyTo(left_roi_d); GpuMat second_roi_d(result_d,Rect(LEFT1+MIDDLEWIDTH,0,framecorrected2_d.cols-RIGHT,framecorrected2_d.rows)); GpuMat croppedImage2; Rect Roi2(RIGHT, 0, framecorrected2_d.cols - RIGHT, framecorrected2_d.rows); croppedImage2 = framecorrected2_d(Roi2); croppedImage2.copyTo(second_roi_d); stitch(framecorrected_d, framecorrected2_d, result_d, framecorrected_d.rows, LEFT1); GpuMat third_roi_d(result_d, Rect(LEFT2 + MIDDLEWIDTH, 0, framecorrected3_d.cols - RIGHT, framecorrected3_d.rows)); GpuMat croppedImage3; croppedImage3 = framecorrected3_d(Roi2); croppedImage3.copyTo(third_roi_d); stitch(result_d, framecorrected3_d, result_d, result_d.rows, LEFT2); GpuMat fourth_roi_d(result_d, Rect(LEFT3 + MIDDLEWIDTH, 0, framecorrected4_d.cols - RIGHT, framecorrected4_d.rows)); GpuMat croppedImage4 = framecorrected4_d(Roi2); croppedImage4.copyTo(fourth_roi_d); stitch(result_d, framecorrected4_d, result_d, result_d.rows, LEFT3); result_d.download(result); //framecorrected_d.download(framecorrected); //framecorrected2_d.download(framecorrected2); //framecorrected3_d.download(framecorrected3); //framecorrected4_d.download(framecorrected4); time(&finish); /*remap(frame, framecorrected, x_map2, y_map2, INTER_LINEAR); remap(frame2, framecorrected2, x_map2, y_map2, INTER_LINEAR); remap(frame3, framecorrected3, x_map2, y_map2, INTER_LINEAR); remap(frame4, framecorrected4, x_map2, y_map2, INTER_LINEAR);*/ //namedWindow("CPU", WINDOW_NORMAL); //namedWindow("CPU2", WINDOW_NORMAL); //namedWindow("CPU3", WINDOW_NORMAL); //namedWindow("CPU4", WINDOW_NORMAL); //cv::imshow("CPU", framecorrected); //imshow("CPU2", framecorrected2); //imshow("CPU3", framecorrected3); //imshow("CPU4", framecorrected4); namedWindow("result", WINDOW_NORMAL); imshow("result", result); /**Mat left_roi(res, Rect(0, 0, LEFT1, src.size().height)); Size middleSize(MIDDLEWIDTH, src.size().height);//added Mat middleZone(middleSize, CV_8UC3, Scalar(0, 0, 0));//added Mat croppedImage1; // Rect Roi1(0,0,LEFT,image2.rows); Rect Roi1(0, 0, LEFT1, src.rows); // croppedImage1 = image2(Roi1); croppedImage1 = src(Roi1); croppedImage1.copyTo(left_roi); //printf("largest1 = %d, %d,%d\n",(int)largest1,(int)(image1.cols-largest2),image1.rows); // Mat right_roi(combine,Rect(LEFT+MIDDLEWIDTH,0,image1.cols - RIGHT,image1.rows)); Mat second_roi(res, Rect(LEFT1 + MIDDLEWIDTH, 0, src2.cols - RIGHT, src2.rows)); //Mat right_roi(combine,Rect(largest1,0,image1.cols-largest2,image1.rows)); Mat croppedImage2; //Rect Roi2(RIGHT,0,image1.cols-RIGHT,image1.rows); Rect Roi2(RIGHT, 0, src2.cols - RIGHT, src2.rows); //croppedImage2 = image1(Roi2); croppedImage2 = src2(Roi2); // croppedImage2.copyTo(right_roi); croppedImage2.copyTo(second_roi); //imshow("combine",combine); //imwrite("combined.jpg",combine); //Mat_<Vec3b> _orgImg1 = image2; // Mat_<Vec3b> _orgImg2 = image1; Mat_<Vec3b> _orgImg1 = src; Mat_<Vec3b> _orgImg2 = src2; //Mat_<Vec3b> _retImg = combine; Mat_<Vec3b> _retImg = res; //vector<Mat> channels; //split(combine,channels); for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT1; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT1 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT1 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT1 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT1) / (MIDDLEWIDTH*1.0)) * YellowTwo; // double newRed = (((LEFT+MIDDLEWIDTH) - pointOnLeft)/(MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft- LEFT)/(MIDDLEWIDTH*1.0)) * RedTwo; // double newGreen = ((LEFT+MIDDLEWIDTH-pointOnLeft)/(MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft- LEFT1)/(MIDDLEWIDTH*1.0)) * GreenTwo; // double newYellow = ((LEFT+MIDDLEWIDTH-pointOnLeft)/(MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft- LEFT)/(MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } } Mat third_roi(res, Rect(LEFT2 + MIDDLEWIDTH, 0, src3.cols - RIGHT, src3.rows)); Mat croppedImage3 = src3(Roi2); croppedImage3.copyTo(third_roi); _orgImg1 = res; _orgImg2 = src3; _retImg = res; for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT2; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT2 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT2 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT2 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT2) / (MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } } Mat fourth_roi(res, Rect(LEFT3 + MIDDLEWIDTH, 0, src4.cols - RIGHT, src4.rows)); Mat croppedImage4 = src4(Roi2); croppedImage4.copyTo(fourth_roi); _orgImg1 = res; _orgImg2 = src4; _retImg = res; for (int j = 0; j < res.rows; j++) { for (int i = 0; i < MIDDLEWIDTH; i++) { //int pointOnLeft = i + LEFT; int pointOnLeft = i + LEFT3; int pointOnRight = RIGHT - MIDDLEWIDTH + i; int RedOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[0]; int GreenOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[1]; int YellowOne = _orgImg1.at<Vec3b>(j, pointOnLeft)[2]; int RedTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[0]; int GreenTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[1]; int YellowTwo = _orgImg2.at<Vec3b>(j, pointOnRight)[2]; double newRed = (((LEFT3 + MIDDLEWIDTH) - pointOnLeft) / (MIDDLEWIDTH*1.0)) * RedOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * RedTwo; double newGreen = ((LEFT3 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * GreenOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * GreenTwo; double newYellow = ((LEFT3 + MIDDLEWIDTH - pointOnLeft) / (MIDDLEWIDTH*1.0)) * YellowOne + ((pointOnLeft - LEFT3) / (MIDDLEWIDTH*1.0)) * YellowTwo; _retImg.at<Vec3b>(j, pointOnLeft)[0] = (int)newRed; _retImg.at<Vec3b>(j, pointOnLeft)[1] = (int)newGreen; _retImg.at<Vec3b>(j, pointOnLeft)[2] = (int)newYellow; } }*/ //cv::imshow("GPU", d_frame); if (cv::waitKey(3) > 0) break; } if (!cpu_times.empty() )//&& !gpu_times.empty()) { std::cout << std::endl << "Results:" << std::endl; std::sort(cpu_times.begin(), cpu_times.end()); // std::sort(gpu_times.begin(), gpu_times.end()); double cpu_avg = std::accumulate(cpu_times.begin(), cpu_times.end(), 0.0) / cpu_times.size(); // double gpu_avg = std::accumulate(gpu_times.begin(), gpu_times.end(), 0.0) / gpu_times.size(); std::cout << "CPU : Avg : " << cpu_avg << " ms FPS : " << 1000.0 / cpu_avg << std::endl; //std::cout << "GPU : Avg : " << gpu_avg << " ms FPS : " << 1000.0 / gpu_avg << std::endl; } printf("difference in time = %d\n", difftime(finish,start)); cvDestroyAllWindows(); system("pause"); return 0; }
857cf16b0614698da234c8bb49b28b5b43762aa1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <openssl/md5.h> #include <hip/hip_runtime.h> #include "md5.cu" #include "socket.h" #include <unistd.h> #define NUM_THREADS 128 #define MAX_USERNAME_LENGTH 64 #define DEPTH 100 #define PASSWORD_LENGTH 6 #define SIXTH_POWER (26 * 26 * 26 * 26 * 26* 26) #define FIFTH_POWER (26 * 26 * 26 * 26 * 26) #define FOURTH_POWER (26 * 26 * 26 * 26) #define THIRD_POWER (26 * 26 * 26) #define SECOND_POWER (26 * 26) #define FIRST_POWER 26 #define NUMBER_OF_BINS 256 /*these denote whether or not the hash table is at capacity *in a certain bin in a given addToHashTable call*/ #define SUCCESS 43 #define FAILURE 21 #define HASH_LENGTH 32 /////////////////////////////// GLOBALS AND DATA STRUCTURES ///////////////////////////////// __device__ size_t POWER_ARR[] = {1, FIRST_POWER, SECOND_POWER, THIRD_POWER, FOURTH_POWER, FIFTH_POWER}; __device__ int num_cracked = 0; typedef struct hashInfo{ char password[7]; uint hash[4]; int empty = 1; //denotes whether a hashInfo_t has a hash value or not int length; //gives the number of items in a bin of a hash table }hashInfo_t; /////////////////////////////// GPU FUNCTIONS ///////////////////////////////// /* Determines if the hash dentoed by hash exists in the hash table denoted by table. If it does, the function returns 1, otherwise it returns 0 Password signifes the word to be inserted into the hash table if it matches */ __device__ int isHash(hashInfo_t * table, uint * hash, char * password){ //get the bin of the hash by taking the first byte of the first uint of the hash unsigned char byte; byte = (hash[0]&0xFF); //loops through the bin -- first element of the bin is empty but denotes the size of the bin for(int i = 1; i < table[DEPTH*byte].length+1; i++){ int index = DEPTH*byte+i; if(!table[index].empty && table[index].hash[0] == hash[0] && table[index].hash[1] == hash[1] && table[index].hash[2] == hash[2] && table[index].hash[3] == hash[3]){ num_cracked++; //copies password to the password field of the hashInfo_t with the matching hash memcpy(table[index].password, password, PASSWORD_LENGTH*sizeof(char)); return 1; } } return 0; } /* The kernel, crack, runs on the gpu and brute force cracks 6 character, alphabetic passwords hashData denotes a hash table */ __global__ void crack(hashInfo_t * hashData){ //get string permuation size_t tempNum =((size_t) blockIdx.x) * ((size_t) NUM_THREADS) +((size_t) threadIdx.x); //starts at the lowest value char word[] = "aaaaaa"; //Generate the permutation for the given thread/core for(int i = PASSWORD_LENGTH - 1; i >= 0; i--){ size_t temp = tempNum/(POWER_ARR[i]); word[5 - i] += temp; tempNum = tempNum % POWER_ARR[i]; } //Calculate the hash with the function md5. uint candidate_hash[4]; md5((uint*)word, candidate_hash); //checks for matching hash and inserts word into hashData if candidate_hash matches isHash(hashData, candidate_hash, word); return; } ///////////////////////// HASH TABLE FUNCTIONS //////////////////////////////// //add hash to hash table //uses first byte of hash to index hash in hashTable //returns SUCCESS if there is space in the bin denoted by the first byte for the hash //returns FAILURE otherwise int addToTable(hashInfo_t * table, uint * hash){ //get the bin of the hash by taking the first byte of the first uint of the hash unsigned char byte; byte = (hash[0]&0xFF); //handles insertion and length incrementing in the bin given by byte if(table[DEPTH*byte].length == 0){ table[DEPTH*byte+1].hash[0] = hash[0]; table[DEPTH*byte+1].hash[1] = hash[1]; table[DEPTH*byte+1].hash[2] = hash[2]; table[DEPTH*byte+1].hash[3] = hash[3]; table[DEPTH*byte+1].empty = 0; table[DEPTH*byte].length++; return SUCCESS; } int placement = table[DEPTH*byte].length+1; if(placement - 1 < DEPTH - 1){ table[DEPTH*byte+placement].hash[0] = hash[0]; table[DEPTH*byte+placement].hash[1] = hash[1]; table[DEPTH*byte+placement].hash[2] = hash[2]; table[DEPTH*byte+placement].hash[3] = hash[3]; table[DEPTH*byte].length++; table[DEPTH*byte+placement].empty = 0; return SUCCESS; } return FAILURE; } /////////////////////////// MAIN //////////////////////////// int main(int argc, char* argv[]){ // Make sure the arguments include a port if(argc !=3 ) { fprintf(stderr, "Usage: %s <port number>\n", argv[0]); exit(1); } char * name = argv[1]; unsigned short server_port = atoi(argv[2]); //calculates number of blocks needed assuming every thread computes one //possible six-character, alpabetic string permuation. int number_of_blocks = (SIXTH_POWER+NUM_THREADS)/NUM_THREADS; //Connect to server int server_socket_fd = socket_connect(name, server_port); if(server_socket_fd == -1){ perror("Connection to server failed.\n"); exit(2); } usleep(10000000); printf("Connected\n"); //read in input sent over from the host maching until the the host machine terminates the //connection or the read fucntion errors int rc; hashInfo_t* hash_table = (hashInfo*) malloc(sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH); while((rc = read(server_socket_fd, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH)) <= 0){ //Create the data structure to pass to the GPU hashInfo_t * gpu_hashTable; if(hipMalloc(&gpu_hashTable, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH) != hipSuccess){ perror("Cuda Malloc Failed\n"); exit(2); } //Copy over our provided hashes in arr to the GPU_arr for analysis. if(hipMemcpy(gpu_hashTable, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH, hipMemcpyHostToDevice) != hipSuccess){ close(server_socket_fd); perror("Drone CPU to GPU memcpy Failed\n"); exit(2); } printf("Cracking...\n"); hipLaunchKernelGGL(( crack), dim3(number_of_blocks), dim3(NUM_THREADS), 0, 0, gpu_hashTable); //Ensure all CUDA threads have terminated if(hipDeviceSynchronize() != hipSuccess){ close(server_socket_fd); perror("Drone CUDA Thread Synchronization Error\n"); exit(2); } printf("Done\n"); //Copy back the cracked passwords from the GPU. if(hipMemcpy(hash_table, gpu_hashTable, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH, hipMemcpyDeviceToHost) != hipSuccess){ close(server_socket_fd); perror("Cuda GPU to CPU memcpy Failed\n"); exit(2); } //Send the hash table back to the server if(write(server_socket_fd, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH) <= 0){ perror("Write Passback Failed\n"); close(server_socket_fd); exit(2); } printf("Waiting for another hash table.\n"); free(hash_table); hash_table = (hashInfo*) malloc(sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH); } if(rc == 0){ printf("Connection closed\n"); } else{ perror("Drone could not read hash table.\n"); } close(server_socket_fd); return 0; }
857cf16b0614698da234c8bb49b28b5b43762aa1.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <openssl/md5.h> #include <cuda.h> #include "md5.cu" #include "socket.h" #include <unistd.h> #define NUM_THREADS 128 #define MAX_USERNAME_LENGTH 64 #define DEPTH 100 #define PASSWORD_LENGTH 6 #define SIXTH_POWER (26 * 26 * 26 * 26 * 26* 26) #define FIFTH_POWER (26 * 26 * 26 * 26 * 26) #define FOURTH_POWER (26 * 26 * 26 * 26) #define THIRD_POWER (26 * 26 * 26) #define SECOND_POWER (26 * 26) #define FIRST_POWER 26 #define NUMBER_OF_BINS 256 /*these denote whether or not the hash table is at capacity *in a certain bin in a given addToHashTable call*/ #define SUCCESS 43 #define FAILURE 21 #define HASH_LENGTH 32 /////////////////////////////// GLOBALS AND DATA STRUCTURES ///////////////////////////////// __device__ size_t POWER_ARR[] = {1, FIRST_POWER, SECOND_POWER, THIRD_POWER, FOURTH_POWER, FIFTH_POWER}; __device__ int num_cracked = 0; typedef struct hashInfo{ char password[7]; uint hash[4]; int empty = 1; //denotes whether a hashInfo_t has a hash value or not int length; //gives the number of items in a bin of a hash table }hashInfo_t; /////////////////////////////// GPU FUNCTIONS ///////////////////////////////// /* Determines if the hash dentoed by hash exists in the hash table denoted by table. If it does, the function returns 1, otherwise it returns 0 Password signifes the word to be inserted into the hash table if it matches */ __device__ int isHash(hashInfo_t * table, uint * hash, char * password){ //get the bin of the hash by taking the first byte of the first uint of the hash unsigned char byte; byte = (hash[0]&0xFF); //loops through the bin -- first element of the bin is empty but denotes the size of the bin for(int i = 1; i < table[DEPTH*byte].length+1; i++){ int index = DEPTH*byte+i; if(!table[index].empty && table[index].hash[0] == hash[0] && table[index].hash[1] == hash[1] && table[index].hash[2] == hash[2] && table[index].hash[3] == hash[3]){ num_cracked++; //copies password to the password field of the hashInfo_t with the matching hash memcpy(table[index].password, password, PASSWORD_LENGTH*sizeof(char)); return 1; } } return 0; } /* The kernel, crack, runs on the gpu and brute force cracks 6 character, alphabetic passwords hashData denotes a hash table */ __global__ void crack(hashInfo_t * hashData){ //get string permuation size_t tempNum =((size_t) blockIdx.x) * ((size_t) NUM_THREADS) +((size_t) threadIdx.x); //starts at the lowest value char word[] = "aaaaaa"; //Generate the permutation for the given thread/core for(int i = PASSWORD_LENGTH - 1; i >= 0; i--){ size_t temp = tempNum/(POWER_ARR[i]); word[5 - i] += temp; tempNum = tempNum % POWER_ARR[i]; } //Calculate the hash with the function md5. uint candidate_hash[4]; md5((uint*)word, candidate_hash); //checks for matching hash and inserts word into hashData if candidate_hash matches isHash(hashData, candidate_hash, word); return; } ///////////////////////// HASH TABLE FUNCTIONS //////////////////////////////// //add hash to hash table //uses first byte of hash to index hash in hashTable //returns SUCCESS if there is space in the bin denoted by the first byte for the hash //returns FAILURE otherwise int addToTable(hashInfo_t * table, uint * hash){ //get the bin of the hash by taking the first byte of the first uint of the hash unsigned char byte; byte = (hash[0]&0xFF); //handles insertion and length incrementing in the bin given by byte if(table[DEPTH*byte].length == 0){ table[DEPTH*byte+1].hash[0] = hash[0]; table[DEPTH*byte+1].hash[1] = hash[1]; table[DEPTH*byte+1].hash[2] = hash[2]; table[DEPTH*byte+1].hash[3] = hash[3]; table[DEPTH*byte+1].empty = 0; table[DEPTH*byte].length++; return SUCCESS; } int placement = table[DEPTH*byte].length+1; if(placement - 1 < DEPTH - 1){ table[DEPTH*byte+placement].hash[0] = hash[0]; table[DEPTH*byte+placement].hash[1] = hash[1]; table[DEPTH*byte+placement].hash[2] = hash[2]; table[DEPTH*byte+placement].hash[3] = hash[3]; table[DEPTH*byte].length++; table[DEPTH*byte+placement].empty = 0; return SUCCESS; } return FAILURE; } /////////////////////////// MAIN //////////////////////////// int main(int argc, char* argv[]){ // Make sure the arguments include a port if(argc !=3 ) { fprintf(stderr, "Usage: %s <port number>\n", argv[0]); exit(1); } char * name = argv[1]; unsigned short server_port = atoi(argv[2]); //calculates number of blocks needed assuming every thread computes one //possible six-character, alpabetic string permuation. int number_of_blocks = (SIXTH_POWER+NUM_THREADS)/NUM_THREADS; //Connect to server int server_socket_fd = socket_connect(name, server_port); if(server_socket_fd == -1){ perror("Connection to server failed.\n"); exit(2); } usleep(10000000); printf("Connected\n"); //read in input sent over from the host maching until the the host machine terminates the //connection or the read fucntion errors int rc; hashInfo_t* hash_table = (hashInfo*) malloc(sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH); while((rc = read(server_socket_fd, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH)) <= 0){ //Create the data structure to pass to the GPU hashInfo_t * gpu_hashTable; if(cudaMalloc(&gpu_hashTable, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH) != cudaSuccess){ perror("Cuda Malloc Failed\n"); exit(2); } //Copy over our provided hashes in arr to the GPU_arr for analysis. if(cudaMemcpy(gpu_hashTable, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH, cudaMemcpyHostToDevice) != cudaSuccess){ close(server_socket_fd); perror("Drone CPU to GPU memcpy Failed\n"); exit(2); } printf("Cracking...\n"); crack<<<number_of_blocks, NUM_THREADS>>>(gpu_hashTable); //Ensure all CUDA threads have terminated if(cudaDeviceSynchronize() != cudaSuccess){ close(server_socket_fd); perror("Drone CUDA Thread Synchronization Error\n"); exit(2); } printf("Done\n"); //Copy back the cracked passwords from the GPU. if(cudaMemcpy(hash_table, gpu_hashTable, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH, cudaMemcpyDeviceToHost) != cudaSuccess){ close(server_socket_fd); perror("Cuda GPU to CPU memcpy Failed\n"); exit(2); } //Send the hash table back to the server if(write(server_socket_fd, hash_table, sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH) <= 0){ perror("Write Passback Failed\n"); close(server_socket_fd); exit(2); } printf("Waiting for another hash table.\n"); free(hash_table); hash_table = (hashInfo*) malloc(sizeof(hashInfo_t) * NUMBER_OF_BINS * DEPTH); } if(rc == 0){ printf("Connection closed\n"); } else{ perror("Drone could not read hash table.\n"); } close(server_socket_fd); return 0; }
a32c6bb507fce0df024d5841f8e15edfba40c6e3.hip
// !!! This is a file automatically generated by hipify!!! #include "../headers/gpu.h" namespace host { void alloc_mem_uchar(uchar** array, uint count) { (*array) = (uchar*) malloc(count * sizeof (uchar)); memset((*array), 0, count * sizeof (uchar)); } void alloc_mem_int(int** array, uint count) { (*array) = (int*) malloc(count * sizeof (int)); memset((*array), 0, count * sizeof (int)); } void free_mem_uchar(uchar** array) { free((*array)); (*array) = NULL; } void free_mem_int(int** array) { free((*array)); (*array) = NULL; } void cuda_host2dev(const void *host_array, void *dev_array, uint count, size_t size) { hipMemcpy(dev_array, host_array, (count * size), hipMemcpyHostToDevice); } } namespace dev { void alloc_mem_uchar(uchar** array, uint count) { test(hipMalloc(array, count * sizeof (uchar))); } void alloc_mem_int(int** array, uint count) { test(hipMalloc(array, count * sizeof (int))); } void free_mem_uchar(uchar** array) { test(hipFree((*array))); (*array) = NULL; } void free_mem_int(int** array) { test(hipFree((*array))); (*array) = NULL; } void cuda_dev2host(void* dev_array, void* host_array, uint count, size_t size) { test(hipMemcpy(host_array, dev_array, (count * size), hipMemcpyDeviceToHost)); } void test(hipError_t result) { if (result != hipSuccess) { std::cerr << "Error: " << hipGetErrorString(result) << std::endl; exit(1); } } hipDeviceProp_t get_prop() { hipDeviceProp_t properties; test(hipGetDeviceProperties(&properties, 0)); return properties; } uint get_max_block_size() { return sqrt(get_prop().maxThreadsPerBlock); } uint get_block_size(uint threads, uint problem) { uint max_size = dev::get_max_block_size(); uint size = sqrt(problem / threads); if (size * size > max_size) { std::cerr << "Not enough threads." << std::endl; return max_size; } else if (!size) { std::cerr << "Too many threads." << std::endl; return max_size; } return size; } }
a32c6bb507fce0df024d5841f8e15edfba40c6e3.cu
#include "../headers/gpu.h" namespace host { void alloc_mem_uchar(uchar** array, uint count) { (*array) = (uchar*) malloc(count * sizeof (uchar)); memset((*array), 0, count * sizeof (uchar)); } void alloc_mem_int(int** array, uint count) { (*array) = (int*) malloc(count * sizeof (int)); memset((*array), 0, count * sizeof (int)); } void free_mem_uchar(uchar** array) { free((*array)); (*array) = NULL; } void free_mem_int(int** array) { free((*array)); (*array) = NULL; } void cuda_host2dev(const void *host_array, void *dev_array, uint count, size_t size) { cudaMemcpy(dev_array, host_array, (count * size), cudaMemcpyHostToDevice); } } namespace dev { void alloc_mem_uchar(uchar** array, uint count) { test(cudaMalloc(array, count * sizeof (uchar))); } void alloc_mem_int(int** array, uint count) { test(cudaMalloc(array, count * sizeof (int))); } void free_mem_uchar(uchar** array) { test(cudaFree((*array))); (*array) = NULL; } void free_mem_int(int** array) { test(cudaFree((*array))); (*array) = NULL; } void cuda_dev2host(void* dev_array, void* host_array, uint count, size_t size) { test(cudaMemcpy(host_array, dev_array, (count * size), cudaMemcpyDeviceToHost)); } void test(cudaError_t result) { if (result != cudaSuccess) { std::cerr << "Error: " << cudaGetErrorString(result) << std::endl; exit(1); } } cudaDeviceProp get_prop() { cudaDeviceProp properties; test(cudaGetDeviceProperties(&properties, 0)); return properties; } uint get_max_block_size() { return sqrt(get_prop().maxThreadsPerBlock); } uint get_block_size(uint threads, uint problem) { uint max_size = dev::get_max_block_size(); uint size = sqrt(problem / threads); if (size * size > max_size) { std::cerr << "Not enough threads." << std::endl; return max_size; } else if (!size) { std::cerr << "Too many threads." << std::endl; return max_size; } return size; } }
b230e8b4ab1ad3affc652a9cfe971751f473b1a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "TRCudaV2.cuh" #include "EigenUtility.h" TRCudaV2::TRCudaV2() { } TRCudaV2::~TRCudaV2() { // SaveDelete(VolumeData); SaveDelete(PointType); SaveDelete(PointType_1D); } ////////////////////////////////////////////////////////////////////////// // GPU ////////////////////////////////////////////////////////////////////////// __device__ static float Z1Function(float x1) { // Function XD // https://i.imgur.com/QS3bczf.png return -126.4517 + 0.4005123 * x1 - 0.000011981 * pow(x1 - 2122.41, 2) - 0.000000011664 * pow(x1 - 2122.41, 3) + 0.000000000001432 * pow(x1 - 2122.41, 4) - 0.0000000000000008164 * pow(x1 - 2122.41, 5) + 5.939E-20 * pow(x1 - 2122.41, 6); } __global__ static void RawDataToOriginalData(char* FileRawData, int* OCTRawData, int OCTDataSize) { // 1 Byte 2 Bytes int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= OCTDataSize) { printf(" Raw Data Error!\n"); return; } OCTRawData[id] = (int)((uchar)FileRawData[id * 2] + (uchar)FileRawData[id * 2 + 1] * 256); } __global__ static void CombineTwoChannels_Single(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel Error!\n"); return; } int BoxSize = SizeX * SizeZ; // int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void CombineTwoChannels_Multi(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel Error!\n"); return; } int BoxSize = SizeX * SizeZ * 2; // Channel + int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ) { // int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 (gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int value = OCTData[id]; OCTData[id] = OCTData[changeID]; OCTData[changeID] = value; } __global__ static void GetMatrixA(int* OCTData, float* MatrixA, int NumPolynomial, int OneDataSize) { // Function MatrixA int id = blockIdx.x * blockDim.x + threadIdx.x; // () if (id >= (NumPolynomial + 1) * (NumPolynomial + 1)) { printf(" Fitting !\n"); return; } // Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // float value = 0; for (int i = 0; i < OneDataSize; i++) { // float FirstValue = (float)i / OneDataSize; float SecondValue = (float)i / OneDataSize; value += pow(FirstValue, NumPolynomial - rowIndex) * pow(SecondValue, NumPolynomial - colsIndex); } MatrixA[id] = value; } __global__ static void GetMatrixB(int* OCTData, float* MatrixB, float YAverage, int NumPolynomial, int OneDataSize) { int id = blockIdx.x * blockDim.x + threadIdx.x; // Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // float value = 0; for (int i = 0; i < OneDataSize; i++) { // float FirstValue = (float)i / OneDataSize; float SecondValue = OCTData[i] - YAverage; value += pow(FirstValue, NumPolynomial - rowIndex) * SecondValue; } MatrixB[id] = value; } __global__ static void MinusByFittingFunction(int* OCTData, float* PolyValue, int SizeZ) { // Fitting Data int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // Z int idZ = id % SizeZ; // OCTData[id] -= PolyValue[idZ]; } __global__ static void ComputePXScale(float* PXScale, int OffsetBegin, int ShiftValue, int Steps, int Size) { // PXScale Array(@@) int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= Size) { printf("ComputePXScale !\n"); return; } // int idOffset = OffsetBegin + ShiftValue; PXScale[id] = (Z1Function(idOffset + id) - Z1Function(idOffset)) * Steps; } __global__ static void FrequencyAdjust(int* OCTData, float* KSpaceData, float* PXScale, int* IndexArray, int CutIndex, int SizeX, int SizeY, int SizeZ) { // Denoise Channel int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * SizeZ) { printf("Frequency "); return; } // Index int idZ = id % SizeZ; if (IndexArray[idZ] == -1 || idZ >= CutIndex || idZ == 0) { KSpaceData[id] = 0; return; } // int LastPXScaleIndex = (IndexArray[idZ] - 1 <= 0 ? 0 : IndexArray[idZ] - 1); double m = (double)(OCTData[id] - OCTData[id - 1]) / (PXScale[IndexArray[idZ]] - PXScale[LastPXScaleIndex]); double c = OCTData[id] - m * PXScale[IndexArray[idZ]]; KSpaceData[id] = m * idZ + c; } __global__ static void DataToComplexData(float* KSpaceData, hipfftComplex* FFTData, int OCTDataSize) { // KSpace Data FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize) { printf(" Complex Data !!\n"); return; } // Complex Data FFTData[id].x = KSpaceData[id]; FFTData[id].y = 0; } __global__ static void ComplexDataToData(hipfftComplex* FFTData, float* OCTFloatData, int SizeX, int SizeY, int SizeZ, int OCTDataSize) { // FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (1 * 1024) blockIdx.z * blockDim.x + // Z => (0 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize / 2) { printf("Complex To Data !!\n"); return; } // 0 ( 1024) Youtube (!!) // 2 // () // ( Size / 2 - 1 => 1023) 1022 /*int idZ = id % (SizeZ / 2); idZ = SizeZ / 2 - idZ - 1; if (idZ == SizeZ / 2 - 1) idZ--;*/ int idZ = id % (SizeZ / 2); if (idZ == 0) idZ++; // int tempIndex = id / (SizeZ / 2); int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; int NewIndex = idY * SizeX * SizeZ + idX * SizeZ + idZ; float temp = sqrt(FFTData[NewIndex].x * FFTData[NewIndex].x + FFTData[NewIndex].y * FFTData[NewIndex].y); // if (temp == 0) OCTFloatData[id] = 0; else OCTFloatData[id] = log10f(temp) * 10; } __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // // // => | -> // ("->" "=>" ) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data !!\n"); return; } // int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY // (0 ~ 124 125 ~ 249) // // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; } __global__ static void NormalizeData(float* ShiftData, float MaxValue, float MinValue, int FinalDataSize) { // Normalize int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // if (id >= FinalDataSize) { printf("Normaliza Data \n"); return; } if (ShiftData[id] < MinValue) ShiftData[id] = 0; else if (ShiftData[id] > MaxValue) ShiftData[id] = 1; else ShiftData[id] = (ShiftData[id] - MinValue) / (MaxValue - MinValue); } // & (Smooth Data) __device__ static float SmoothDataByIndex(float* VolumeData, int id, int FinalSizeZ, int SmoothSizeRange) { int idZ = id % FinalSizeZ; int SmoothRadius = (SmoothSizeRange - 1) / 2; // Smooth int MinValue = min(SmoothRadius, idZ - 0); int MaxValue = min(SmoothRadius, FinalSizeZ - idZ - 1); float TempTotal = 0; // for (int i = -MinValue; i <= MaxValue; i++) TempTotal += VolumeData[id + i]; TempTotal /= (MaxValue + MinValue + 1); return TempTotal; } __global__ static void TransformToImageAndBorderData(float* VolumeData_Normalized, float* SmoothData, uchar* ImageArray, int SizeX, int SizeY, int FinalSizeZ, int SmoothSizeRange) { // int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * 1 * 1024 blockIdx.x * gridDim.z * blockDim.x + // X => X * 1 * 1024 blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * FinalSizeZ) // return; // Border Detect SmoothData[id] = SmoothDataByIndex(VolumeData_Normalized, id, FinalSizeZ, SmoothSizeRange); // 1.3 float data = VolumeData_Normalized[id] * 255 * 1.3f; if (data >= 255) ImageArray[id] = 255; else if (data <= 0) ImageArray[id] = 0; else ImageArray[id] = (uchar)data; } // __global__ static void ZCalcBrightness(float* DataArray, float* BrightArray, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; BrightArray[id] = 0; for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; BrightArray[id] += DataArray[currentID]; } } __global__ static void findMaxAndMinPeak(float* DataArray, float* BrightnessArray, uchar* PointType, int size, int rows, int cols, float MaxPeakThreshold, float SatPeakThreshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= rows * cols * size) // return; // width 1 ~ (width - 1) int colID = id % cols; if (1 >= colID || colID == (cols - 1)) return; // int tempIndex = id / cols; if (BrightnessArray[tempIndex] > SatPeakThreshold) return; // // ( ) float DiffLeft = DataArray[id] - DataArray[id - 1]; float DiffRight = DataArray[id] - DataArray[id + 1]; if (DiffLeft > 0 && DiffRight > 0 && DataArray[id] > MaxPeakThreshold) PointType[id] = 1; else if (DiffLeft < 0 && DiffRight < 0) PointType[id] = 2; } __global__ static void ParseMaxMinPeak(uchar* PointType, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; // Skip Min int lastMinID = -1; bool FindMax = false; // () int Useful_Start = -1; int Useful_End = -1; int Useful_PeakCount = -1, tempPeakCount = 0; // min Peak for (int i = 0; i < startIndex; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; PointType[currentID] = 0; } for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; if (lastMinID == -1) // or Max { // Min if (PointType[currentID] == 2) lastMinID = i; else if (PointType[currentID] == 1) PointType[currentID] = 0; // } else { // min min if (PointType[currentID] == 1) { // Max FindMax = true; tempPeakCount++; } else if (FindMax && PointType[currentID] == 2) { // Max Min if (Useful_PeakCount < tempPeakCount) { Useful_PeakCount = tempPeakCount; Useful_Start = lastMinID; Useful_End = i; } FindMax = false; tempPeakCount = 0; lastMinID = -1; } else if (!FindMax && PointType[currentID] == 2) { // Max Min PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; lastMinID = i; } } } // Min if (lastMinID != -1) PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; } __device__ static void InsertBestNChoice(float* CandidateGap, int* PointType_BestN, int offsetIndex, int bestNoffsetIndex, int CurrentIndex, int ChooseBestN) { bool IsInsert = false; for (int i = 0; i < ChooseBestN && !IsInsert; i++) { // 0 if (PointType_BestN[bestNoffsetIndex + i] > 0) { // int preIndex = PointType_BestN[bestNoffsetIndex + i]; if (CandidateGap[offsetIndex + preIndex] >= CandidateGap[offsetIndex + CurrentIndex]) // continue; else if (CandidateGap[offsetIndex + preIndex] < CandidateGap[offsetIndex + CurrentIndex]) // { for (int j = ChooseBestN - 1; j > i; j--) PointType_BestN[bestNoffsetIndex + j] = PointType_BestN[bestNoffsetIndex + j - 1]; PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; IsInsert = true; } } else { PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; break; } } } __global__ static void PickBestChoiceToArray(float* DataArray, uchar* PointType, float* CandidateGap, int* PointType_BestN, int size, int rows, int cols, int ChooseBestN, int startIndex, float Threshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; bool IsFindMin = false; // float MinData; int offsetIndex = sizeIndex * rows * cols + rowIndex * cols; int bestNoffsetIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; float lastData = -1; for (int i = startIndex; i < cols; i++) { // if (PointType[i + offsetIndex] == 2) { // if (IsFindMin) lastData = -1; IsFindMin = true; MinData = DataArray[i + offsetIndex]; } else if ( IsFindMin && // PointType[i + offsetIndex] == 1 && DataArray[i + offsetIndex] - MinData > Threshold // Threshold ) { lastData = DataArray[i + offsetIndex] - MinData; // PointType_BestN CandidateGap[offsetIndex + i] = lastData; InsertBestNChoice(CandidateGap, PointType_BestN, offsetIndex, bestNoffsetIndex, i, ChooseBestN); } } // 0 for (int i = 0; i < ChooseBestN; i++) if (PointType_BestN[bestNoffsetIndex + i] == 0) PointType_BestN[bestNoffsetIndex + i] = -1; } __global__ static void CalcNeighbor(int* PointType_BestN, float* NeighborCountArray, int size, int rows, int cols, int ChooseBestN, int Radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // return; // Index int sizeIndex = id / rows; int rowIndex = id % rows; // index int chooseIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; for (int i = 0; i < ChooseBestN; i++) { // int totalPixelCount = 0; float avgPixel = 0; int BestN = PointType_BestN[chooseIndex + i]; if (BestN == -1) { NeighborCountArray[chooseIndex + i] == 0; continue; } // for (int y = -Radius; y <= Radius; y++) for (int x = -Radius; x <= Radius; x++) for (int n = 0; n < ChooseBestN; n++) { int currentSizeIndex = sizeIndex + y; int currentRowIndex = rowIndex + x; if (0 <= currentSizeIndex && currentSizeIndex < size && 0 <= currentRowIndex && currentRowIndex < rows) { totalPixelCount++; int CurrentBestNIndex = currentSizeIndex * rows * ChooseBestN + currentRowIndex * ChooseBestN + n; int CurrentBestN = PointType_BestN[CurrentBestNIndex]; // Return if (CurrentBestN == -1) continue; if (abs(CurrentBestN - BestN) <= Radius) avgPixel++; } } // NeighborCountArray[chooseIndex + i] = avgPixel / totalPixelCount; } // int maxIndex = (thrust::max_element(thrust::device, NeighborCountArray + chooseIndex, NeighborCountArray + chooseIndex + ChooseBestN) - (NeighborCountArray + chooseIndex)); PointType_BestN[chooseIndex] = PointType_BestN[chooseIndex + maxIndex]; for (int i = 1; i < ChooseBestN; i++) PointType_BestN[i] = -1; } __global__ static void ConnectPointsStatus(int* PointType_BestN, int* ConnectStatus, int size, int rows, int ChooseBestN, int ConnectRadius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows * ChooseBestN) // return; // Index int sizeIndex = id / (rows * ChooseBestN); int tempID = id % (rows * ChooseBestN); int rowIndex = tempID / ChooseBestN; int chooseIndex = tempID % ChooseBestN; // if (PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] == -1) return; // int finalPos = min(rowIndex + ConnectRadius, rows); // for (int i = rowIndex + 1; i < finalPos; i++) { for (int j = 0; j < ChooseBestN; j++) { // ( i row ) // 1 if (PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j] != -1) { // // int diffX = PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] - PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j]; int diffY = i - rowIndex; int Radius = diffX * diffX + diffY * diffY; // 0 if (Radius < ConnectRadius * ConnectRadius) { // + Row + Top N + + Top N int index = sizeIndex * rows * ChooseBestN * ConnectRadius * ChooseBestN + // rowIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Row chooseIndex * ConnectRadius * ChooseBestN + // Top N (i - rowIndex) * ChooseBestN + // j; ConnectStatus[index] = Radius; } } } } } // Multi TopView __global__ static void GetOtherSideView(float* Data, float* OtherSideData, int SizeX, int SizeY, int FinalSizeZ) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= SizeX * SizeY) { printf("!!\n"); return; } // id int idX = id / SizeY; int idY = id % SizeY; int DataOffsetIndex = idX * SizeY * FinalSizeZ + idY * FinalSizeZ; // SizeZ float totalZ = 0; for (int i = 0; i < FinalSizeZ; i++) totalZ += Data[DataOffsetIndex + i]; // // rows => (SizeY) // cols => SizeX int offsetIndex = idY * SizeX + idX; OtherSideData[offsetIndex] = totalZ; } __global__ static void TransformOtherSideDataToImage(float* OtherSideData, uchar* UintOtherSideData, float Mean, float FixMean, int SizeX, int SizeY) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= SizeX * SizeY) // return; // Mean float ScaleFactor = FixMean / Mean / 255; float data = OtherSideData[id] * 255 * ScaleFactor; if (data >= 255) UintOtherSideData[id] = 255; else if (data <= 0) UintOtherSideData[id] = 0; else UintOtherSideData[id] = (uchar)data; } ////////////////////////////////////////////////////////////////////////// // CPU ////////////////////////////////////////////////////////////////////////// // Function void TRCudaV2::SingleRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // // 1. GPU Data // 2. ( 2 Bytes QT GPU 2 Bytes) // Channels2 ( 2) // 3. 5 Fit // 4. Space K Space // 5. cuFFT // () // 7. Normalize // 8. // 9. // 10. GPU Data // // // 1. Function => X Y // 2. ShiftValue => TRIGGER DELAY(FIBER) // 3. K_Step => (14.mm 2.5k step2)(k stepz1~2.5) // 4. CutValue => OCTzlaser ( cuteValue XD) // 5. 2 Channel 2048 x 250 x 2 x 2 x 2 // () x () x (()) x Channel x 2 Byte ////////////////////////////////////////////////////////////////////////// #pragma region 1. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => Raw Data int *GPU_OCTRawData_2Channel; // => OCT Raw Data (2Channels"" Channel ) int *GPU_OCTRawData; // => Denoise Data ( CH1 + CH2 ) ("" Channel) float *GPU_OCTFloatData; // => K Space FFT // !! () 2 DataSize /= 2; // 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeZ == 4); // 2 Byte & 2 Channles // hipMalloc(&GPU_FileRawData, sizeof(char) * DataSize); // Copy () hipMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize / 2, hipMemcpyHostToDevice); hipMemcpy(GPU_FileRawData + DataSize / 2, FileRawData + DataSize, sizeof(char) * DataSize / 2, hipMemcpyHostToDevice); CheckCudaError(); // 2 Chanels int OCTDataSize = SizeX * SizeZ; if (UseTwoChannels) hipMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); hipMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); hipMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. ////////////////////////////////////////////////////////////////////////// // // ( 0 ~ 250) // // channel ////////////////////////////////////////////////////////////////////////// // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 2 Byte if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // Channel Denoise CombineTwoChannels_Single << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, 1, SizeZ); // hipFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // FileRaw Data hipFree(GPU_FileRawData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. Fitting // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // Matrix float* GPU_MatrixA; float* GPU_MatrixB; hipMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); hipMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // int* FirstSizeZData = new int[SizeZ]; hipMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, hipMemcpyDeviceToHost); hipDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; hipMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), hipMemcpyDeviceToHost); hipMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Eigen Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); hipMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); hipMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, hipMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // hipFree(GPU_MatrixA); hipFree(GPU_MatrixB); hipFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. Space K Space // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; hipMalloc(&GPU_PXScale, sizeof(float) * SizeZ); hipMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // int OffsetBegin = 800; // PXScale Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // K Space Index ( GPU thread thread CPU ) hipMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, hipMemcpyDeviceToHost); // K Space Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // K Space KSpaceIndexArray Index Index hipMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, hipMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, 1, SizeZ); CheckCudaError(); // hipFree(GPU_PXScale); hipFree(GPU_KSpaceIndexArray); hipFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. Space K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif hipfftHandle PlanHandle; hipfftComplex* GPU_ComplexData; // FFT Handle & C2C hipfftComplex int NX = SizeZ; int BatchSize = SizeX; hipfftPlan1d(&PlanHandle, NX, HIPFFT_C2C, BatchSize); hipMalloc(&GPU_ComplexData, sizeof(hipfftComplex) * NX * BatchSize); CheckCudaError(); // Complex Data //gpuDataToComplex << <512, 4 >> > (GPU_OCTFloatData, GPU_ComplexData, NX * BatchSize, 0); DataToComplexData << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // cuFFT(CUDA Fast Fourier Transform) hipfftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, HIPFFT_FORWARD); CheckCudaError(); // (FFT) & // https://www.youtube.com/watch?v=spUNpyF58BY //gpuComplexToData << <512, 4 >> > (GPU_ComplexData, GPU_OCTFloatData, NX * BatchSize / 2, SizeZ, 0); ComplexDataToData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, 1, SizeZ, OCTDataSize); CheckCudaError(); // hipfftDestroy(PlanHandle); hipFree(GPU_ComplexData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float *GPU_BrightnessArray; hipMalloc(&GPU_BrightnessArray, sizeof(float) * SizeX); ZCalcBrightness << <1, SizeZ >> > (GPU_OCTFloatData, GPU_BrightnessArray, 1, SizeX, SizeZ, StartIndex); CheckCudaError(); // float MaxValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OCTFloatData, GPU_OCTFloatData + OCTDataSize / 2); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); CheckCudaError(); // ( GPU Normalize) // // TL // // // BR float MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = i * SizeZ / 2 + i; int endIndex = i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_OCTFloatData + beginIndex, GPU_OCTFloatData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // Normaliza Data (Max - Min) 0 // ( array Min & Max ()) assert(MaxValue != MinValue && "FFT!!"); NormalizeData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, MaxValue, MinValue, OCTDataSize / 2); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; hipMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ); hipMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * 1 * SizeZ); CheckCudaError(); // TransformToImageAndBorderData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, 1, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // size = 1; rows = SizeX; cols = SizeZ / 2; // hipFree(GPU_OCTFloatData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. // // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // uchar* GPU_PointType; hipMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); hipMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region assert(rows <= NumThreads && "rows 1024 "); // & findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // int *GPU_PointType_BestN, *PointType_BestN; hipMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); //PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); //CheckCudaError(); // // => * 250(rows) * (ChooseBestN) * (Raidus) * N (ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; hipMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); hipMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN , rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; hipMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, hipMemcpyDeviceToHost); hipMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, hipMemcpyDeviceToHost); hipMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, hipMemcpyDeviceToHost); CheckCudaError(); // GetSurface(PointType_BestN, Connect_Status); #pragma endregion // hipFree(GPU_PointType); hipFree(GPU_PointType_BestN); hipFree(GPU_Connect_Status); hipFree(GPU_OCTSmoothData); hipFree(GPU_BrightnessArray); delete[] Connect_Status; delete[] PointType_BestN; #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // SaveDelete(VolumeData); VolumeData = new uchar[SizeX * 1 * SizeZ]; hipMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ / 2, hipMemcpyDeviceToHost); // GPU hipFree(GPU_UintDataArray); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << ": " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } void TRCudaV2::MultiRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeY, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // // 1. GPU Data // 2. ( 2 Bytes QT GPU 2 Bytes) // Channels2 ( 2) // 3. 5 Fit // 4. Space K Space // 5. cuFFT // 6. Data // 6.5 TopView (TopView ) // 7. Normalize // 8. // 9. // 10. GPU Data // // // 1. Function => X Y // 2. ShiftValue => TRIGGER DELAY(FIBER) // 3. K_Step => (14.mm 2.5k step2)(k stepz1~2.5) // 4. CutValue => OCTzlaser ( cuteValue XD) // 5. Function SizeY // 6. TopView ////////////////////////////////////////////////////////////////////////// #pragma region 1. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => Raw Data int *GPU_OCTRawData_2Channel; // => OCT Raw Data (2Channels"" Channel ) int *GPU_OCTRawData; // => Denoise Data ( CH1 + CH2 ) ("" Channel) float *GPU_OCTFloatData; // => K Space FFT // 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeY / SizeZ == 4); // 2 Byte & 2 Channles // hipMalloc(&GPU_FileRawData, sizeof(char) * DataSize); hipMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize, hipMemcpyHostToDevice); CheckCudaError(); // 2 Chanels int OCTDataSize = SizeX * SizeY * SizeZ; if (UseTwoChannels) hipMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); hipMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); hipMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. ////////////////////////////////////////////////////////////////////////// // // ( 0 ~ 250) // // channel ////////////////////////////////////////////////////////////////////////// // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 2 Byte if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // Channel Denoise CombineTwoChannels_Multi << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, SizeY, SizeZ); // hipFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // Index ReverseBackScanData << < dim3(SizeX / 2, SizeY / 2, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, SizeX, SizeY, SizeZ); // FileRaw Data hipFree(GPU_FileRawData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. Fitting // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // Matrix float* GPU_MatrixA; float* GPU_MatrixB; hipMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); hipMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // int* FirstSizeZData = new int[SizeZ]; hipMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, hipMemcpyDeviceToHost); hipDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; hipMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), hipMemcpyDeviceToHost); hipMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), hipMemcpyDeviceToHost); hipDeviceSynchronize(); // Eigen Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); hipMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); hipMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, hipMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // hipFree(GPU_MatrixA); hipFree(GPU_MatrixB); hipFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. Space K Space // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; hipMalloc(&GPU_PXScale, sizeof(float) * SizeZ); hipMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // int OffsetBegin = 800; // PXScale Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // K Space Index ( GPU thread thread CPU ) hipMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, hipMemcpyDeviceToHost); // K Space Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // K Space KSpaceIndexArray Index Index hipMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, hipMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, SizeY, SizeZ); CheckCudaError(); // hipFree(GPU_PXScale); hipFree(GPU_KSpaceIndexArray); hipFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. Space K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif hipfftHandle PlanHandle; hipfftComplex* GPU_ComplexData; // FFT Handle & C2C hipfftComplex int NX = SizeZ; int BatchSize = SizeX * SizeY; hipfftPlan1d(&PlanHandle, NX, HIPFFT_C2C, BatchSize); hipMalloc(&GPU_ComplexData, sizeof(hipfftComplex) * NX * BatchSize); CheckCudaError(); // Complex Data DataToComplexData << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // cuFFT(CUDA Fast Fourier Transform) hipfftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, HIPFFT_FORWARD); CheckCudaError(); // (FFT) & // https://www.youtube.com/watch?v=spUNpyF58BY ComplexDataToData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, SizeY, SizeZ, OCTDataSize); CheckCudaError(); // hipfftDestroy(PlanHandle); hipFree(GPU_ComplexData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6. Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif float* GPU_ShiftData; hipMalloc(&GPU_ShiftData, sizeof(float) * OCTDataSize / 2); // // ShiftFinalData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_ShiftData, SizeX, SizeY, SizeZ / 2, OCTDataSize / 2); CheckCudaError(); // hipFree(GPU_OCTFloatData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6.5 TopView // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // TopView float* GPU_OtherSideData; hipMalloc(&GPU_OtherSideData, sizeof(float) * OCTDataSize / 2); GetOtherSideView << <SizeX, SizeY >> > (GPU_ShiftData, GPU_OtherSideData, SizeX, SizeY, SizeZ / 2); CheckCudaError(); hipDeviceSynchronize(); // float MaxValue = 0, MinValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); float *GPU_MinElement = thrust::min_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&MinValue, GPU_MinElement, sizeof(float), hipMemcpyDeviceToHost); NormalizeData << < SizeX, SizeY >> > (GPU_OtherSideData, MaxValue, MinValue, SizeX * SizeY); CheckCudaError(); // Top View float MeanValue = thrust::reduce(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY) / SizeX / SizeY; uchar* GPU_UintOtherSideData; hipMalloc(&GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY); TransformOtherSideDataToImage << <SizeX, SizeY >> > (GPU_OtherSideData, GPU_UintOtherSideData, MeanValue, OtherSideMean, SizeX, SizeY); CheckCudaError(); // hipFree(GPU_OtherSideData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6.5. TopView : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // MaxValue = 0; GPU_MaxElement = thrust::max_element(thrust::device, GPU_ShiftData, GPU_ShiftData + OCTDataSize / 2); hipMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), hipMemcpyDeviceToHost); CheckCudaError(); hipDeviceSynchronize(); // ( GPU Normalize) // // TL // // // BR MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + i; int endIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_ShiftData + beginIndex, GPU_ShiftData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // Normaliza Data (Max - Min) 0 // ( array Min & Max ()) assert(MaxValue != MinValue && "FFT!!"); NormalizeData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, MaxValue, MinValue, OCTDataSize / 2); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; hipMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ); hipMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * SizeY * SizeZ); CheckCudaError(); // TransformToImageAndBorderData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, SizeY, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // size = SizeY; rows = SizeX; cols = SizeZ / 2; // hipFree(GPU_ShiftData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // uchar* GPU_PointType; hipMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); hipMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region assert(rows <= NumThreads && "rows 1024 "); // float *GPU_BrightnessArray; hipMalloc(&GPU_BrightnessArray, sizeof(float) * size * rows); ZCalcBrightness << <size, rows >> > (GPU_OCTSmoothData, GPU_BrightnessArray, size, rows, cols, StartIndex); CheckCudaError(); // & findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // int *GPU_PointType_BestN, *PointType_BestN; float* GPU_CandidateGap; hipMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); hipMalloc(&GPU_CandidateGap, sizeof(float) * size * rows * cols); // Gap hipMemset(GPU_CandidateGap, 0, sizeof(float) * size * rows * cols); hipMemset(GPU_PointType_BestN, 0, sizeof(int) * size * rows * ChooseBestN); PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_CandidateGap, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); CheckCudaError(); // Neighbor float* GPU_NeighborCountArray; hipMalloc(&GPU_NeighborCountArray, sizeof(float) * size * rows * ChooseBestN); CalcNeighbor << <size, rows >> > (GPU_PointType_BestN, GPU_NeighborCountArray, size, rows, cols, ChooseBestN, DenoiseWindowsRadius); CheckCudaError(); // // => * 250(rows) * (ChooseBestN) * (Raidus) * N (ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; hipMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); hipMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN, rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; TestBestN = new int[size * rows * ChooseBestN]; hipMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, hipMemcpyDeviceToHost); hipMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, hipMemcpyDeviceToHost); hipMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, hipMemcpyDeviceToHost); hipMemcpy(TestBestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, hipMemcpyDeviceToHost); CheckCudaError(); // GetSurface(PointType_BestN, Connect_Status); #pragma endregion // hipFree(GPU_PointType); hipFree(GPU_PointType_BestN); hipFree(GPU_Connect_Status); hipFree(GPU_OCTSmoothData); hipFree(GPU_BrightnessArray); hipFree(GPU_CandidateGap); hipFree(GPU_NeighborCountArray); delete[] Connect_Status; delete[] PointType_BestN; // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. GPU Data // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // SaveDelete(VolumeData); VolumeData = new uchar[SizeX * SizeY * SizeZ / 2]; hipMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ / 2, hipMemcpyDeviceToHost); SaveDelete(VolumeData_OtherSide); VolumeData_OtherSide = new uchar[SizeX * SizeY]; hipMemcpy(VolumeData_OtherSide, GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY, hipMemcpyDeviceToHost); // GPU hipFree(GPU_UintDataArray); hipFree(GPU_UintOtherSideData); // #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << ": " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } // vector<Mat> TRCudaV2::TransfromMatArray(bool SaveBorder = false) { // Mat vector<Mat> ImgArray; for (int i = 0; i < size; i++) { // Offset Mat img(rows, cols, CV_8U, VolumeData + i * rows * cols); cvtColor(img, img, CV_GRAY2BGR); // ImgArray.push_back(img); } if (SaveBorder) { // Debug peak /*for (int i = 0; i < size; i++) for (int j = 0; j < rows * cols; j++) { int offsetIndex = i * rows * cols; int rowIndex = j / cols; int colIndex = j % cols; Vec3b color(0, 0, 0); if (PointType[offsetIndex + j] == 1) color = Vec3b(0, 255, 255); else if (PointType[offsetIndex + j] == 2) color = Vec3b(255, 255, 255); ImgArray[i].at<Vec3b>(rowIndex, colIndex) = color; }*/ // for (int i = 0; i < size; i++) for (int j = 0; j < rows; j++) { int index = i * rows + j; if (PointType_1D[index] != -1) { Point contourPoint(PointType_1D[index], j); circle(ImgArray[i], contourPoint, 2, Scalar(0, 255, 255), CV_FILLED); } } } return ImgArray; } Mat TRCudaV2::TransformToOtherSideView() { assert(size > 1 && ""); Mat img(rows, size, CV_8U, VolumeData_OtherSide); cvtColor(img, img, CV_GRAY2BGR); return img; } void TRCudaV2::CopySingleBorder(int* LastArray) { assert(LastArray != NULL && PointType_1D != NULL && size == 1 && " Array !!"); // assert call () memcpy(LastArray, PointType_1D, sizeof (int) * size * rows); } void TRCudaV2::CopyBorder(int* BorderArray) { assert(BorderArray != NULL && PointType_1D != NULL && size != 1 && " Array !!"); // assert call () memcpy(BorderArray, PointType_1D, sizeof(int) * size * rows); } bool TRCudaV2::ShakeDetect_Single(int* LastArray, bool ShowDebugMessage) { // int voteNum = 0; // float MoveDis = 0; // // for (int i = 0; i < rows; i++) { if (PointType_1D[i] != -1 && LastArray[i] != -1) { MoveDis += abs(PointType_1D[i] - LastArray[i]); voteNum++; } } // if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if(ShowDebugMessage) cout << "(pixel): " << (float)MoveDis << endl; // if (MoveDis < OCT_Move_Threshold) return false; } return true; } bool TRCudaV2::ShakeDetect_Multi(bool UsePreiseThreshold, bool ShowDebugMessage) { // 60 ~ 200 int voteNum = 0; // float MoveDis = 0; // // Reverse 0 ~ 250 for (int i = 60; i < 200; i++) { bool IsMove = false; // int leftIndex = 124 * rows + i; // 124 int rightIndex = 125 * rows + i; // 125 // for (int j = size / 2 - 1; j >= 0; j--) if (PointType_1D[j * rows + i] != -1) { leftIndex = j * rows + i; break; } // for (int j = size / 2; j < size; j++) if (PointType_1D[j] != -1) { rightIndex = j * rows + i; break; } int leftY = PointType_1D[leftIndex]; int rightY = PointType_1D[rightIndex]; // if (PointType_1D[leftIndex] != -1 && PointType_1D[rightIndex] != -1) { int DisMid = abs(PointType_1D[rightIndex] - PointType_1D[leftIndex]); MoveDis += DisMid; voteNum++; } } // if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if (ShowDebugMessage) cout << "(pixel): " << (float)MoveDis << endl; // if (UsePreiseThreshold) { // if (MoveDis < OCT_Move_Precise_Threshold) return false; } else { // if (MoveDis < OCT_Move_Threshold) return false; } } else if (ShowDebugMessage) cout << "!!" << endl; return true; } ////////////////////////////////////////////////////////////////////////// // Helper Function ////////////////////////////////////////////////////////////////////////// void TRCudaV2::GetSurface(int *PointType_BestN, int *Connect_Status) { // N #pragma omp parallel for //num_thread(4) for (int i = 0; i < size; i++) { // 10 Sample int RowGap = rows / 10; vector<vector<ConnectInfo>> StatusVector; for (int j = 0; j < rows; j += RowGap) for (int chooseNIndex = 0; chooseNIndex < ChooseBestN; chooseNIndex++) { int begin = j; int end = j; // if (PointType_BestN[i * rows * ChooseBestN + j * ChooseBestN + chooseNIndex] == -1) continue; // vector<ConnectInfo> Connect; #pragma region // ConnectInfo info; info.rowIndex = j; info.chooseIndex = chooseNIndex; Connect.push_back(info); int FindIndex = j; int FindChooseIndex = chooseNIndex; bool IsFind = true; while (IsFind && FindIndex > 0) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size (FindIndex - k) * ChooseBestN * ConnectRadius * ChooseBestN + // Rows nextChooseNIndex * ConnectRadius * ChooseBestN + // Top N ( ChooseIndex) k * ChooseBestN + // FindChooseIndex; if (FindIndex - k >= 0 && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // if (minMoveIndex != -1) { // FindIndex = FindIndex - minMoveIndex; FindChooseIndex = minChooseIndex; // info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // IsFind = true; } else IsFind = false; } #pragma endregion #pragma region FindIndex = j; FindChooseIndex = chooseNIndex; while (IsFind && FindIndex < rows - 1) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size FindIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Rows FindChooseIndex * ConnectRadius * ChooseBestN + // Top N k * ChooseBestN + // nextChooseNIndex; if (FindIndex + k < rows && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // if (minMoveIndex != -1) { // FindIndex = FindIndex + minMoveIndex; FindChooseIndex = minChooseIndex; // info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // IsFind = true; } else IsFind = false; } #pragma endregion // 1 if (Connect.size() > 1) { // sort(Connect.begin(), Connect.end(), SortByRows); StatusVector.push_back(Connect); } } // if (StatusVector.size() == 0) { memset(&PointType_1D[i * rows], -1, sizeof(int) * rows); continue; } // sort(StatusVector.begin(), StatusVector.end(), SortByVectorSize); // () vector<int> BestCandidate; int Begin = rows; int End = 0; for (int j = 0; j < StatusVector.size() && j < 3; j++) { int CurrentBegin = StatusVector[j][0].rowIndex; int CurrentEnd = StatusVector[j][StatusVector[j].size() - 1].rowIndex; if (Begin > CurrentBegin) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } if (End < CurrentEnd) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } } // for (int j = 1; j < BestCandidate.size(); j++) if (StatusVector[BestCandidate[j]].size() >= 3) for (int k = 0; k < StatusVector[BestCandidate[j]].size(); k++) StatusVector[0].push_back(StatusVector[j][k]); vector<ConnectInfo> LineVector = StatusVector[0]; int index = 0; // LineVector Index for (int j = 0; j < rows; j++) { int Type1D_Index = i * rows + j; if (LineVector[index].rowIndex != j) PointType_1D[Type1D_Index] = -1; else if (LineVector[index].rowIndex == j) { int BestN_Index = i * rows * ChooseBestN + // LineVector[index].rowIndex * ChooseBestN + // row LineVector[index].chooseIndex; // ChooseIndex // PointType PointType_1D[j + i * rows] = PointType_BestN[BestN_Index]; index++; if (index >= LineVector.size()) { for (int k = j + 1; k < rows; k++) PointType_1D[k + i * rows] = -1; break; } } } } // Smooth int* tempPointType_1D = new int[size * rows]; for (int i = 0; i < size; i++) for (int j = 0; j < rows; j ++) { int totalPoint = 0; int totalZ = 0; int index = i * rows + j; if (PointType_1D[index] == -1) { tempPointType_1D[index] = -1; continue; } for (int k = -DenoiseWindowsRadius; k <= DenoiseWindowsRadius; k++) for (int l = -DenoiseWindowsRadius; l <= DenoiseWindowsRadius; l++) { int currentI = i + k; int currentJ = j + l; if (0 <= currentI && currentI < size && 0 <= currentJ && currentJ < rows) { int currentIndex = currentI *rows + currentJ; if (PointType_1D[currentIndex] != -1) { totalPoint++; totalZ += PointType_1D[currentIndex]; } } } tempPointType_1D[index] = totalZ / totalPoint; } memcpy(PointType_1D, tempPointType_1D, sizeof(int) * size * rows); delete[] tempPointType_1D; } bool TRCudaV2::SortByRows(ConnectInfo left, ConnectInfo right) { return left.rowIndex < right.rowIndex; } bool TRCudaV2::SortByVectorSize(vector<ConnectInfo> left, vector<ConnectInfo> right) { return right.size() < left.size(); } void TRCudaV2::CheckCudaError() { hipError_t GPU_Error = hipGetLastError(); if (GPU_Error != hipSuccess) { cout << hipGetErrorString(GPU_Error) << endl; assert(false); exit(-1); } } void TRCudaV2::SaveDelete(void* pointer) { if (pointer != NULL) delete[] pointer; }
b230e8b4ab1ad3affc652a9cfe971751f473b1a8.cu
#include "TRCudaV2.cuh" #include "EigenUtility.h" TRCudaV2::TRCudaV2() { } TRCudaV2::~TRCudaV2() { // 這邊要刪除其他創出來的變數 SaveDelete(VolumeData); SaveDelete(PointType); SaveDelete(PointType_1D); } ////////////////////////////////////////////////////////////////////////// // GPU ////////////////////////////////////////////////////////////////////////// __device__ static float Z1Function(float x1) { // 這個 Function 不確定在幹嘛XD // https://i.imgur.com/QS3bczf.png return -126.4517 + 0.4005123 * x1 - 0.000011981 * pow(x1 - 2122.41, 2) - 0.000000011664 * pow(x1 - 2122.41, 3) + 0.000000000001432 * pow(x1 - 2122.41, 4) - 0.0000000000000008164 * pow(x1 - 2122.41, 5) + 5.939E-20 * pow(x1 - 2122.41, 6); } __global__ static void RawDataToOriginalData(char* FileRawData, int* OCTRawData, int OCTDataSize) { // 這邊是原本讀取是 1個 Byte 要轉乘 2個 Bytes 為一筆資料 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= OCTDataSize) { printf("轉 Raw Data 有 Error!\n"); return; } OCTRawData[id] = (int)((uchar)FileRawData[id * 2] + (uchar)FileRawData[id * 2 + 1] * 256); } __global__ static void CombineTwoChannels_Single(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel 有 Error!\n"); return; } int BoxSize = SizeX * SizeZ; // 這邊沒有反掃,所以直接接上大小 int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void CombineTwoChannels_Multi(int* OCTData_2Channls, int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 這邊應該是不會發生,就當作例外判斷 if (id >= SizeX * SizeY * SizeZ) { printf("Combine Two Channel 有 Error!\n"); return; } int BoxSize = SizeX * SizeZ * 2; // 一個 Channel 的資料是 正掃 + 反掃 int BoxIndex = id / BoxSize; int BoxLeft = id % BoxSize; OCTData[id] = (OCTData_2Channls[BoxIndex * 2 * BoxSize + BoxLeft] + OCTData_2Channls[(BoxIndex * 2 + 1) * BoxSize + BoxLeft]) / 2; } __global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是要反轉 反掃的資料 int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 (gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int value = OCTData[id]; OCTData[id] = OCTData[changeID]; OCTData[changeID] = value; } __global__ static void GetMatrixA(int* OCTData, float* MatrixA, int NumPolynomial, int OneDataSize) { // 這個 Function 是去取得 MatrixA 的值 int id = blockIdx.x * blockDim.x + threadIdx.x; // 例外判斷 (理論上應該也是不會超過) if (id >= (NumPolynomial + 1) * (NumPolynomial + 1)) { printf("多項式 Fitting 有問題!\n"); return; } // 算 Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // 做相加 float value = 0; for (int i = 0; i < OneDataSize; i++) { // 抓出兩項的值 float FirstValue = (float)i / OneDataSize; float SecondValue = (float)i / OneDataSize; value += pow(FirstValue, NumPolynomial - rowIndex) * pow(SecondValue, NumPolynomial - colsIndex); } MatrixA[id] = value; } __global__ static void GetMatrixB(int* OCTData, float* MatrixB, float YAverage, int NumPolynomial, int OneDataSize) { int id = blockIdx.x * blockDim.x + threadIdx.x; // 算 Index int rowIndex = id % (NumPolynomial + 1); int colsIndex = id / (NumPolynomial + 1); // 做相加 float value = 0; for (int i = 0; i < OneDataSize; i++) { // 抓出兩項的值 float FirstValue = (float)i / OneDataSize; float SecondValue = OCTData[i] - YAverage; value += pow(FirstValue, NumPolynomial - rowIndex) * SecondValue; } MatrixB[id] = value; } __global__ static void MinusByFittingFunction(int* OCTData, float* PolyValue, int SizeZ) { // 這邊要減掉 Fitting Data int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 先拿出他是第幾個 Z int idZ = id % SizeZ; // 減掉預測的值 OCTData[id] -= PolyValue[idZ]; } __global__ static void ComputePXScale(float* PXScale, int OffsetBegin, int ShiftValue, int Steps, int Size) { // 這邊是算出 PXScale Array(詳細在幹嘛我不是很懂@@) int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= Size) { printf("ComputePXScale 有問題!\n"); return; } // 聽說是去直流 int idOffset = OffsetBegin + ShiftValue; PXScale[id] = (Z1Function(idOffset + id) - Z1Function(idOffset)) * Steps; } __global__ static void FrequencyAdjust(int* OCTData, float* KSpaceData, float* PXScale, int* IndexArray, int CutIndex, int SizeX, int SizeY, int SizeZ) { // 這邊是 Denoise,把兩個 Channel 的資料相加 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * SizeZ) { printf("Frequency 轉換的地方有問題"); return; } // 算回原本的 Index int idZ = id % SizeZ; if (IndexArray[idZ] == -1 || idZ >= CutIndex || idZ == 0) { KSpaceData[id] = 0; return; } // 要算斜率前,先拿出上一筆資料 int LastPXScaleIndex = (IndexArray[idZ] - 1 <= 0 ? 0 : IndexArray[idZ] - 1); double m = (double)(OCTData[id] - OCTData[id - 1]) / (PXScale[IndexArray[idZ]] - PXScale[LastPXScaleIndex]); double c = OCTData[id] - m * PXScale[IndexArray[idZ]]; KSpaceData[id] = m * idZ + c; } __global__ static void DataToComplexData(float* KSpaceData, cufftComplex* FFTData, int OCTDataSize) { // 把 KSpace 的 Data 塞進 FFT int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize) { printf("放進 Complex Data 有錯誤!!\n"); return; } // 放進 Complex Data 裡 FFTData[id].x = KSpaceData[id]; FFTData[id].y = 0; } __global__ static void ComplexDataToData(cufftComplex* FFTData, float* OCTFloatData, int SizeX, int SizeY, int SizeZ, int OCTDataSize) { // FFT 資料塞回原本的資料集 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (1 * 1024) blockIdx.z * blockDim.x + // Z => (0 * 1024 + Z2) threadIdx.x; if (id >= OCTDataSize / 2) { printf("Complex To Data 有錯誤!!\n"); return; } // 這邊要避免 0 頻率與 最大頻率(由於只取一半的右邊,所以只拿 1024),詳情請看 Youtube 連結 (你看學長有多好,都找連結給你了,還不看!!) // 這邊要除以 2 是因為它會對稱 // 然後拿的順序要反過來 (由於東元那邊的程式是這樣) // 如果是最大頻率 (也就是 Size / 2 - 1 => 1023),那就要去下一個 也就是 1022 /*int idZ = id % (SizeZ / 2); idZ = SizeZ / 2 - idZ - 1; if (idZ == SizeZ / 2 - 1) idZ--;*/ int idZ = id % (SizeZ / 2); if (idZ == 0) idZ++; // 這邊的算法要對應回去原本的資料 int tempIndex = id / (SizeZ / 2); int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; int NewIndex = idY * SizeX * SizeZ + idX * SizeZ + idZ; float temp = sqrt(FFTData[NewIndex].x * FFTData[NewIndex].x + FFTData[NewIndex].y * FFTData[NewIndex].y); // 做一下例外判斷 if (temp == 0) OCTFloatData[id] = 0; else OCTFloatData[id] = log10f(temp) * 10; } __global__ static void ShiftFinalData(float* AfterFFTData, float* ShiftData, int SizeX, int SizeY, int FinalSizeZ, int FinalDataSize) { // 這邊要做位移 // 由於硬體是這樣子 ↓ // => | -> // ("->" 是指第一段,"=>" 是指第二段) int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= FinalDataSize) { printf("Shift Data 有錯誤!!\n"); return; } // 這邊的算法要對應回去原本的資料 int idZ = id % FinalSizeZ; int tempIndex = id / FinalSizeZ; int idX = tempIndex % SizeX; int idY = tempIndex / SizeX; // SizeY 折回來 // (0 ~ 124 125 ~ 249) // ↓ // (125 ~ 249 0 ~ 124) idY = (idY + SizeY / 2) % SizeY; int NewIndex = idY * SizeX * FinalSizeZ + idX * FinalSizeZ + idZ; ShiftData[id] = AfterFFTData[NewIndex]; //ShiftData[id] = AfterFFTData[id]; } __global__ static void NormalizeData(float* ShiftData, float MaxValue, float MinValue, int FinalDataSize) { // 這邊是根據資料的最大最小值,去做 Normalize 資料 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024) blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; // 例外判斷 if (id >= FinalDataSize) { printf("Normaliza Data 超出範圍\n"); return; } if (ShiftData[id] < MinValue) ShiftData[id] = 0; else if (ShiftData[id] > MaxValue) ShiftData[id] = 1; else ShiftData[id] = (ShiftData[id] - MinValue) / (MaxValue - MinValue); } // 轉成圖片 & 產生邊界判斷 (Smooth 後的 Data)的資料 __device__ static float SmoothDataByIndex(float* VolumeData, int id, int FinalSizeZ, int SmoothSizeRange) { int idZ = id % FinalSizeZ; int SmoothRadius = (SmoothSizeRange - 1) / 2; // Smooth 這個區段的資料 int MinValue = min(SmoothRadius, idZ - 0); int MaxValue = min(SmoothRadius, FinalSizeZ - idZ - 1); float TempTotal = 0; // 把範圍內的部分相加 for (int i = -MinValue; i <= MaxValue; i++) TempTotal += VolumeData[id + i]; TempTotal /= (MaxValue + MinValue + 1); return TempTotal; } __global__ static void TransformToImageAndBorderData(float* VolumeData_Normalized, float* SmoothData, uchar* ImageArray, int SizeX, int SizeY, int FinalSizeZ, int SmoothSizeRange) { // 這邊是將原本的資料,轉換完圖片 int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * 1 * 1024 blockIdx.x * gridDim.z * blockDim.x + // X => X * 1 * 1024 blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; if (id >= SizeX * SizeY * FinalSizeZ) // 判斷是否超出大小 return; // 產生 Border Detect 的資料 SmoothData[id] = SmoothDataByIndex(VolumeData_Normalized, id, FinalSizeZ, SmoothSizeRange); // 這個 1.3 倍,是東元測出來的 float data = VolumeData_Normalized[id] * 255 * 1.3f; if (data >= 255) ImageArray[id] = 255; else if (data <= 0) ImageArray[id] = 0; else ImageArray[id] = (uchar)data; } // 邊界部分 __global__ static void ZCalcBrightness(float* DataArray, float* BrightArray, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 超出範圍 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; BrightArray[id] = 0; for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; BrightArray[id] += DataArray[currentID]; } } __global__ static void findMaxAndMinPeak(float* DataArray, float* BrightnessArray, uchar* PointType, int size, int rows, int cols, float MaxPeakThreshold, float SatPeakThreshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= rows * cols * size) // 超出範圍 return; // width 判斷 1 ~ (width - 1) int colID = id % cols; if (1 >= colID || colID == (cols - 1)) return; // 是否飽和 int tempIndex = id / cols; if (BrightnessArray[tempIndex] > SatPeakThreshold) return; // 接著要去比周圍 // 峰值判斷 (要比兩邊高,且峰值要高於某一個值,且左 或右差值,只有一端能高於這個值) float DiffLeft = DataArray[id] - DataArray[id - 1]; float DiffRight = DataArray[id] - DataArray[id + 1]; if (DiffLeft > 0 && DiffRight > 0 && DataArray[id] > MaxPeakThreshold) PointType[id] = 1; else if (DiffLeft < 0 && DiffRight < 0) PointType[id] = 2; } __global__ static void ParseMaxMinPeak(uchar* PointType, int size, int rows, int cols, int startIndex) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 超出範圍 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; // 然後這邊要去 Skip 所有的 Min int lastMinID = -1; bool FindMax = false; // 為了要抓出 最大(有效)的 區間 int Useful_Start = -1; int Useful_End = -1; int Useful_PeakCount = -1, tempPeakCount = 0; // 刪除多餘 min Peak for (int i = 0; i < startIndex; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; PointType[currentID] = 0; } for (int i = startIndex; i < cols; i++) { int currentID = sizeIndex * rows * cols + rowIndex * cols + i; if (lastMinID == -1) // 判斷是不適剛開始 or 找到 Max { // 要先去抓出第一個 Min if (PointType[currentID] == 2) lastMinID = i; else if (PointType[currentID] == 1) PointType[currentID] = 0; // 這邊代表沒有遇到峰值,應該是雜訊了 } else { // 已經抓到 min 了之後,要去濾掉其他的 min if (PointType[currentID] == 1) { // 抓到 Max FindMax = true; tempPeakCount++; } else if (FindMax && PointType[currentID] == 2) { // 抓到 Max 之後,又找到一個 Min if (Useful_PeakCount < tempPeakCount) { Useful_PeakCount = tempPeakCount; Useful_Start = lastMinID; Useful_End = i; } FindMax = false; tempPeakCount = 0; lastMinID = -1; } else if (!FindMax && PointType[currentID] == 2) { // 沒抓到 Max 只抓到 Min PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; lastMinID = i; } } } // 跑到最後結束,要再去判斷最後一個是否是多餘的 Min if (lastMinID != -1) PointType[sizeIndex * rows * cols + rowIndex * cols + lastMinID] = 0; } __device__ static void InsertBestNChoice(float* CandidateGap, int* PointType_BestN, int offsetIndex, int bestNoffsetIndex, int CurrentIndex, int ChooseBestN) { bool IsInsert = false; for (int i = 0; i < ChooseBestN && !IsInsert; i++) { // 大於 0 代表已經有值了 if (PointType_BestN[bestNoffsetIndex + i] > 0) { // 比較 int preIndex = PointType_BestN[bestNoffsetIndex + i]; if (CandidateGap[offsetIndex + preIndex] >= CandidateGap[offsetIndex + CurrentIndex]) // 原先的比他大,代表不加入,找下一個 continue; else if (CandidateGap[offsetIndex + preIndex] < CandidateGap[offsetIndex + CurrentIndex]) // 把剩下來的往後推,並加入此答案 { for (int j = ChooseBestN - 1; j > i; j--) PointType_BestN[bestNoffsetIndex + j] = PointType_BestN[bestNoffsetIndex + j - 1]; PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; IsInsert = true; } } else { PointType_BestN[bestNoffsetIndex + i] = CurrentIndex; break; } } } __global__ static void PickBestChoiceToArray(float* DataArray, uchar* PointType, float* CandidateGap, int* PointType_BestN, int size, int rows, int cols, int ChooseBestN, int startIndex, float Threshold) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; bool IsFindMin = false; // 是否找到底端 float MinData; int offsetIndex = sizeIndex * rows * cols + rowIndex * cols; int bestNoffsetIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; float lastData = -1; for (int i = startIndex; i < cols; i++) { // 先找最小的 if (PointType[i + offsetIndex] == 2) { // 如果前面已經有找到其他點的話 if (IsFindMin) lastData = -1; IsFindMin = true; MinData = DataArray[i + offsetIndex]; } else if ( IsFindMin && // 要先找到最低點 PointType[i + offsetIndex] == 1 && DataArray[i + offsetIndex] - MinData > Threshold // 接著找大於這個 Threshold ) { lastData = DataArray[i + offsetIndex] - MinData; // 把差距加進去,跟前面的比較,找出最好的加入 PointType_BestN CandidateGap[offsetIndex + i] = lastData; InsertBestNChoice(CandidateGap, PointType_BestN, offsetIndex, bestNoffsetIndex, i, ChooseBestN); } } // 把其他的設定為 0 for (int i = 0; i < ChooseBestN; i++) if (PointType_BestN[bestNoffsetIndex + i] == 0) PointType_BestN[bestNoffsetIndex + i] = -1; } __global__ static void CalcNeighbor(int* PointType_BestN, float* NeighborCountArray, int size, int rows, int cols, int ChooseBestN, int Radius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / rows; int rowIndex = id % rows; // 先塞 index int chooseIndex = sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN; for (int i = 0; i < ChooseBestN; i++) { // 清空陣列 int totalPixelCount = 0; float avgPixel = 0; int BestN = PointType_BestN[chooseIndex + i]; if (BestN == -1) { NeighborCountArray[chooseIndex + i] == 0; continue; } // 算有幾個在鄰居 for (int y = -Radius; y <= Radius; y++) for (int x = -Radius; x <= Radius; x++) for (int n = 0; n < ChooseBestN; n++) { int currentSizeIndex = sizeIndex + y; int currentRowIndex = rowIndex + x; if (0 <= currentSizeIndex && currentSizeIndex < size && 0 <= currentRowIndex && currentRowIndex < rows) { totalPixelCount++; int CurrentBestNIndex = currentSizeIndex * rows * ChooseBestN + currentRowIndex * ChooseBestN + n; int CurrentBestN = PointType_BestN[CurrentBestNIndex]; // 如果沒有東西就 Return if (CurrentBestN == -1) continue; if (abs(CurrentBestN - BestN) <= Radius) avgPixel++; } } // 算完之後,先塞到裡面 NeighborCountArray[chooseIndex + i] = avgPixel / totalPixelCount; } // 只保留最大的 int maxIndex = (thrust::max_element(thrust::device, NeighborCountArray + chooseIndex, NeighborCountArray + chooseIndex + ChooseBestN) - (NeighborCountArray + chooseIndex)); PointType_BestN[chooseIndex] = PointType_BestN[chooseIndex + maxIndex]; for (int i = 1; i < ChooseBestN; i++) PointType_BestN[i] = -1; } __global__ static void ConnectPointsStatus(int* PointType_BestN, int* ConnectStatus, int size, int rows, int ChooseBestN, int ConnectRadius) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= size * rows * ChooseBestN) // 判斷是否超出大小 return; // 算 Index int sizeIndex = id / (rows * ChooseBestN); int tempID = id % (rows * ChooseBestN); int rowIndex = tempID / ChooseBestN; int chooseIndex = tempID % ChooseBestN; // 代表這個點沒有有效的點 if (PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] == -1) return; // 如果是有效的點,就繼續往下追 int finalPos = min(rowIndex + ConnectRadius, rows); // 截止條件 for (int i = rowIndex + 1; i < finalPos; i++) { for (int j = 0; j < ChooseBestN; j++) { // 下一個點的位置 (第 i 個 row 的點) // 然後的第 1 個點 if (PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j] != -1) { // 前面項為現在這個點 // 後面項為往下的點 int diffX = PointType_BestN[sizeIndex * rows * ChooseBestN + rowIndex * ChooseBestN + chooseIndex] - PointType_BestN[sizeIndex * rows * ChooseBestN + i * ChooseBestN + j]; int diffY = i - rowIndex; int Radius = diffX * diffX + diffY * diffY; // 0 沒有用到喔 if (Radius < ConnectRadius * ConnectRadius) { // 張數的位移 + Row 的位移 + 現在在 Top N 的點 + 半徑的位移 + 往下 Top N 的結果 int index = sizeIndex * rows * ChooseBestN * ConnectRadius * ChooseBestN + // 張數 rowIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Row chooseIndex * ConnectRadius * ChooseBestN + // 現在在 Top N (i - rowIndex) * ChooseBestN + // 半徑 j; ConnectStatus[index] = Radius; } } } } } // 這邊是例外,只有 Multi 才有TopView __global__ static void GetOtherSideView(float* Data, float* OtherSideData, int SizeX, int SizeY, int FinalSizeZ) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= SizeX * SizeY) { printf("範圍有錯!!\n"); return; } // id 換算 int idX = id / SizeY; int idY = id % SizeY; int DataOffsetIndex = idX * SizeY * FinalSizeZ + idY * FinalSizeZ; // 總和一個 SizeZ float totalZ = 0; for (int i = 0; i < FinalSizeZ; i++) totalZ += Data[DataOffsetIndex + i]; // 這邊的單位要調整一下 // rows => 是張樹 (SizeY) // cols => 是 SizeX int offsetIndex = idY * SizeX + idX; OtherSideData[offsetIndex] = totalZ; } __global__ static void TransformOtherSideDataToImage(float* OtherSideData, uchar* UintOtherSideData, float Mean, float FixMean, int SizeX, int SizeY) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id >= SizeX * SizeY) // 判斷是否超出大小 return; // 位移到設定的 Mean 直間 float ScaleFactor = FixMean / Mean / 255; float data = OtherSideData[id] * 255 * ScaleFactor; if (data >= 255) UintOtherSideData[id] = 255; else if (data <= 0) UintOtherSideData[id] = 0; else UintOtherSideData[id] = (uchar)data; } ////////////////////////////////////////////////////////////////////////// // CPU ////////////////////////////////////////////////////////////////////////// // 轉換 Function void TRCudaV2::SingleRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // 算時間 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // 步驟說明 // 1. 上傳 GPU Data // 2. 一開始要把資料讀進來 (由於原本的資料都是 2個 Bytes 為一組,但 QT 目前是先用 GPU 轉換到 2個 Bytes),和 // 由於資料有 兩個 Channels,要相加除以2,可以去除雜訊 (由於原本的能量強度資料是使用三角波,所以會有去跟回兩個資料,就是把這兩筆資料相加除以 2) // 3. 用 5 次項去 Fit 一條曲線 // 4. λ Space 轉成 K Space // 5. cuFFT // (這個部分不用位移) // 7. 根據最大最小值來 Normalize 資料 // 8. 轉成圖 // 9. 邊界判斷 // 10. 抓下 GPU Data // // 細節說明: // 1. 轉換 Function => X 快軸、Y 慢軸 // 2. ShiftValue => TRIGGER DELAY位移(換FIBER,電線校正回來用的) // 3. K_Step => 深度(14.多mm對應 2.5的k step;可以考慮之後用2)(k step越大,z軸越深,但資料精細度越差;1~2.5) // 4. CutValue => OCT每個z軸,前面數據減去多少。原因是開頭的laser弱,干涉訊號不明顯,拿掉的資料會比較美。 (東元那邊的變數是 cuteValue XD) // 5. 這邊如果是 2 Channel 的話,大小為 2048 x 250 x 2 x 2 x 2 // (深度) x (快軸) x (慢軸(反掃)) x Channel x 2個 Byte 為一組 ////////////////////////////////////////////////////////////////////////// #pragma region 1. 上傳 GPU Data // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => 從檔案讀進來的 Raw Data int *GPU_OCTRawData_2Channel; // => 這個是 OCT 掃完全部的 Raw Data (2Channels,如果"只用到一個" Channel 那就不會用到這個陣列) int *GPU_OCTRawData; // => 這個是實際 Denoise 的 Data (也就是 CH1 + CH2 的資料) (如果"只有一個" Channel,就只會用到這個陣列) float *GPU_OCTFloatData; // => 這個會用在兩個地方,一個是 K Space 的資料,一個是 FFT 後的資料 // 注意!! 因為只拿一組,不需要 兩個慢軸的資訊 (也就是反掃的資訊),所以除以 2 DataSize /= 2; // 是否是 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeZ == 4); // 2 Byte & 2 Channles // 原始資料 cudaMalloc(&GPU_FileRawData, sizeof(char) * DataSize); // 這邊要分兩個 Copy (略過反掃資料) cudaMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize / 2, cudaMemcpyHostToDevice); cudaMemcpy(GPU_FileRawData + DataSize / 2, FileRawData + DataSize, sizeof(char) * DataSize / 2, cudaMemcpyHostToDevice); CheckCudaError(); // 判對是否使用 2 Chanels int OCTDataSize = SizeX * SizeZ; if (UseTwoChannels) cudaMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); cudaMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); cudaMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. 上傳至 GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. 讀檔轉換 ////////////////////////////////////////////////////////////////////////// // 這邊的資料格式是這樣 // ↗↘↗↘ 是一組 (↗代表掃描 0 ~ 250的一次資料) // 其中一個 ↗↘ 是一個三角波的資料 // 但因為有兩個 channel 所以一組資料是 ↗↘↗↘ ////////////////////////////////////////////////////////////////////////// // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 解出 2 Byte 的資料 if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // 兩個 Channel 作 Denoise CombineTwoChannels_Single << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, 1, SizeZ); // 刪除 cudaFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // 刪除 FileRaw Data cudaFree(GPU_FileRawData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. 讀檔轉換: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. 用五次項去 Fitting // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 Matrix float* GPU_MatrixA; float* GPU_MatrixB; cudaMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); cudaMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // 先算平均 int* FirstSizeZData = new int[SizeZ]; cudaMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // 取得 Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; cudaMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 解 Eigen 找 Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // 扣除那個 Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); cudaMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); cudaMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, cudaMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // 刪除多出來的 cudaFree(GPU_MatrixA); cudaFree(GPU_MatrixB); cudaFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. 多項式去 Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. λ Space 轉成 K Space // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; cudaMalloc(&GPU_PXScale, sizeof(float) * SizeZ); cudaMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // 設定一些系數 int OffsetBegin = 800; // 算出 PXScale 的 Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // 抓下來準備算 K Space Index (由於這邊如果使用 GPU 去做,會導致大部分的 thread 在等最大工作量的 thread,所以這裡 CPU 做會比較快) cudaMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, cudaMemcpyDeviceToHost); // 算 K Space 的對應 Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // 由於 K Space 不是線性關係,所以要從 KSpaceIndexArray,找 Index,再從左右兩個點中,內插出實際在這個 Index 的值 cudaMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, cudaMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, 1, SizeZ); CheckCudaError(); // 釋放記憶體 cudaFree(GPU_PXScale); cudaFree(GPU_KSpaceIndexArray); cudaFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. λ Space 轉成 K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif cufftHandle PlanHandle; cufftComplex* GPU_ComplexData; // 這邊是創建 FFT 的 Handle & C2C 的 cufftComplex int NX = SizeZ; int BatchSize = SizeX; cufftPlan1d(&PlanHandle, NX, CUFFT_C2C, BatchSize); cudaMalloc(&GPU_ComplexData, sizeof(cufftComplex) * NX * BatchSize); CheckCudaError(); // 把資料塞進 Complex Data 裡 //gpuDataToComplex << <512, 4 >> > (GPU_OCTFloatData, GPU_ComplexData, NX * BatchSize, 0); DataToComplexData << <dim3(SizeX, 1, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // 執行 cuFFT(CUDA™ Fast Fourier Transform) cufftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, CUFFT_FORWARD); CheckCudaError(); // 刪除鏡向(FFT轉完之後會兩邊對稱) & 搬移資料 // 想知道更多:https://www.youtube.com/watch?v=spUNpyF58BY //gpuComplexToData << <512, 4 >> > (GPU_ComplexData, GPU_OCTFloatData, NX * BatchSize / 2, SizeZ, 0); ComplexDataToData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, 1, SizeZ, OCTDataSize); CheckCudaError(); // 刪除 cufftDestroy(PlanHandle); cudaFree(GPU_ComplexData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 要算出原始整條的強度值 float *GPU_BrightnessArray; cudaMalloc(&GPU_BrightnessArray, sizeof(float) * SizeX); ZCalcBrightness << <1, SizeZ >> > (GPU_OCTFloatData, GPU_BrightnessArray, 1, SizeX, SizeZ, StartIndex); CheckCudaError(); // 算最大值 float MaxValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OCTFloatData, GPU_OCTFloatData + OCTDataSize / 2); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); CheckCudaError(); // 最小值 (拿一塊不會使用的 GPU 部分,來做 Normalize) // 拿一個正方形的區塊 // TL---x // |   | // |   | // x---BR float MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = i * SizeZ / 2 + i; int endIndex = i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_OCTFloatData + beginIndex, GPU_OCTFloatData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // 因為 Normaliza Data 要做一件事情是 除 (Max - Min) ,要預防他除以 0 // 所以這邊先判斷兩個是不是位置一樣 (因為如果整個 array 值都一樣,Min & Max 給的位置都會一樣(以驗證過)) assert(MaxValue != MinValue && "FFT後最大最小值一樣,資料有錯誤!!"); NormalizeData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, MaxValue, MinValue, OCTDataSize / 2); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. 轉成圖 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 圖片的資料 uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; cudaMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ); cudaMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * 1 * SizeZ); CheckCudaError(); // 轉圖片 TransformToImageAndBorderData << <dim3(SizeX, 1, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, 1, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // 設定一下其他參數 size = 1; rows = SizeX; cols = SizeZ / 2; // 刪除記憶體 cudaFree(GPU_OCTFloatData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. 轉成圖: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. 邊界判斷 // 目前邊界判斷沒有寫 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // 點的型別 uchar* GPU_PointType; cudaMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); cudaMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region 抓取邊界 assert(rows <= NumThreads && "rows 要小於 1024 的限制"); // 找最大最小值 & 刪除過飽合的部分 findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse 一些連續最小值 ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // 抓出一維陣列 int *GPU_PointType_BestN, *PointType_BestN; cudaMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); //PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); //CheckCudaError(); // 連結點 // 這個的大小 為 => 張數 * 250(rows) * 取幾個最大值(ChooseBestN個) * 每個最大值底下有 半徑個 (Raidus) * 的下 N 排的幾個最大值(ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; cudaMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); cudaMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN , rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // 把資料傳回 CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; cudaMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, cudaMemcpyDeviceToHost); cudaMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, cudaMemcpyDeviceToHost); cudaMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, cudaMemcpyDeviceToHost); CheckCudaError(); // 抓取最大的線 GetSurface(PointType_BestN, Connect_Status); #pragma endregion // 刪除記憶體 cudaFree(GPU_PointType); cudaFree(GPU_PointType_BestN); cudaFree(GPU_Connect_Status); cudaFree(GPU_OCTSmoothData); cudaFree(GPU_BrightnessArray); delete[] Connect_Status; delete[] PointType_BestN; #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. 抓取邊界: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. 抓下 GPU Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 刪除之前的資料 SaveDelete(VolumeData); VolumeData = new uchar[SizeX * 1 * SizeZ]; cudaMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * 1 * SizeZ / 2, cudaMemcpyDeviceToHost); // 刪除 GPU cudaFree(GPU_UintDataArray); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. 抓下 GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // 結算 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << "轉換單張點雲: " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } void TRCudaV2::MultiRawDataToPointCloud(char* FileRawData, int DataSize, int SizeX, int SizeY, int SizeZ, long ShiftValue, double K_Step, int CutValue) { // 計算時間 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock(); #endif ////////////////////////////////////////////////////////////////////////// // 步驟說明 // 1. 上傳 GPU Data // 2. 一開始要把資料讀進來 (由於原本的資料都是 2個 Bytes 為一組,但 QT 目前是先用 GPU 轉換到 2個 Bytes),和 // 由於資料有 兩個 Channels,要相加除以2,可以去除雜訊 (由於原本的能量強度資料是使用三角波,所以會有去跟回兩個資料,就是把這兩筆資料相加除以 2) // 3. 用 5 次項去 Fit 一條曲線 // 4. λ Space 轉成 K Space // 5. cuFFT // 6. 位移 Data // 6.5 要找出TopView (這邊有多一個要找出TopView ) // 7. 根據最大最小值來 Normalize 資料 // 8. 轉成圖 // 9. 邊界判斷 // 10. 抓下 GPU Data // // 細節說明: // 1. 轉換 Function => X 快軸、Y 慢軸 // 2. ShiftValue => TRIGGER DELAY位移(換FIBER,電線校正回來用的) // 3. K_Step => 深度(14.多mm對應 2.5的k step;可以考慮之後用2)(k step越大,z軸越深,但資料精細度越差;1~2.5) // 4. CutValue => OCT每個z軸,前面數據減去多少。原因是開頭的laser弱,干涉訊號不明顯,拿掉的資料會比較美。 (東元那邊的變數是 cuteValue XD) // 5. 只是這邊比上方的 Function 多了 SizeY 個 // 6. 有多一個 找出TopView ////////////////////////////////////////////////////////////////////////// #pragma region 1. 上傳 GPU Data // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME clock_t time = clock(); #endif // GPU Data char* GPU_FileRawData; // => 從檔案讀進來的 Raw Data int *GPU_OCTRawData_2Channel; // => 這個是 OCT 掃完全部的 Raw Data (2Channels,如果"只用到一個" Channel 那就不會用到這個陣列) int *GPU_OCTRawData; // => 這個是實際 Denoise 的 Data (也就是 CH1 + CH2 的資料) (如果"只有一個" Channel,就只會用到這個陣列) float *GPU_OCTFloatData; // => 這個會用在兩個地方,一個是 K Space 的資料,一個是 FFT 後的資料 // 是否是 2 Channels bool UseTwoChannels = (DataSize / SizeX / SizeY / SizeZ == 4); // 2 Byte & 2 Channles // 原始資料 cudaMalloc(&GPU_FileRawData, sizeof(char) * DataSize); cudaMemcpy(GPU_FileRawData, FileRawData, sizeof(char) * DataSize, cudaMemcpyHostToDevice); CheckCudaError(); // 判對是否使用 2 Chanels int OCTDataSize = SizeX * SizeY * SizeZ; if (UseTwoChannels) cudaMalloc(&GPU_OCTRawData_2Channel, sizeof(int) * OCTDataSize * 2); cudaMalloc(&GPU_OCTRawData, sizeof(int) * OCTDataSize); cudaMalloc(&GPU_OCTFloatData, sizeof(float) * OCTDataSize); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "1. 上傳至 GPU: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 2. 讀檔轉換 ////////////////////////////////////////////////////////////////////////// // 這邊的資料格式是這樣 // ↗↘↗↘ 是一組 (↗代表掃描 0 ~ 250的一次資料) // 其中一個 ↗↘ 是一個三角波的資料 // 但因為有兩個 channel 所以一組資料是 ↗↘↗↘ ////////////////////////////////////////////////////////////////////////// // 初始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 解出 2 Byte 的資料 if (UseTwoChannels) { RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads * 2), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData_2Channel, DataSize / 2); CheckCudaError(); // 兩個 Channel 作 Denoise CombineTwoChannels_Multi << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData_2Channel, GPU_OCTRawData, SizeX, SizeY, SizeZ); // 刪除 cudaFree(GPU_OCTRawData_2Channel); } else RawDataToOriginalData << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_FileRawData, GPU_OCTRawData, DataSize / 2); CheckCudaError(); // 反掃的資料,Index 要反轉 ReverseBackScanData << < dim3(SizeX / 2, SizeY / 2, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, SizeX, SizeY, SizeZ); // 刪除 FileRaw Data cudaFree(GPU_FileRawData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "2. 讀檔轉換: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 3. 用五次項去 Fitting // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 Matrix float* GPU_MatrixA; float* GPU_MatrixB; cudaMalloc(&GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1)); cudaMalloc(&GPU_MatrixB, sizeof(float) * (NumPolynomial + 1)); // 先算平均 int* FirstSizeZData = new int[SizeZ]; cudaMemcpy(FirstSizeZData, GPU_OCTRawData, sizeof(int) * SizeZ, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); float average = accumulate(FirstSizeZData, FirstSizeZData + SizeZ, 0.0) / SizeZ; delete[] FirstSizeZData; // 取得 Matrix GetMatrixA << <1, (NumPolynomial + 1) * (NumPolynomial + 1) >> > (GPU_OCTRawData, GPU_MatrixA, NumPolynomial, SizeZ); GetMatrixB << <1, NumPolynomial + 1 >> > (GPU_OCTRawData, GPU_MatrixB, average, NumPolynomial, SizeZ); CheckCudaError(); float* MatrixA = new float[(NumPolynomial + 1) *(NumPolynomial + 1)]; float* MatrixB = new float[(NumPolynomial + 1)]; cudaMemcpy(MatrixA, GPU_MatrixA, sizeof(float) * (NumPolynomial + 1) *(NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaMemcpy(MatrixB, GPU_MatrixB, sizeof(float) * (NumPolynomial + 1), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 解 Eigen 找 Fitting Function EigenUtility eigen; eigen.SetAverageValue(average); eigen.SolveByEigen(MatrixA, MatrixB, NumPolynomial); // 扣除那個 Function float* GPU_PolyValue; float* PolyValue = eigen.GetFunctionArray(SizeZ, average); cudaMalloc(&GPU_PolyValue, sizeof(float) * SizeZ); cudaMemcpy(GPU_PolyValue, PolyValue, sizeof(float) * SizeZ, cudaMemcpyHostToDevice); MinusByFittingFunction << < dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_PolyValue, SizeZ); CheckCudaError(); // 刪除多出來的 cudaFree(GPU_MatrixA); cudaFree(GPU_MatrixB); cudaFree(GPU_PolyValue); delete[] MatrixA; delete[] MatrixB; delete[] PolyValue; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "3. 多項式去 Fitting : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 4. λ Space 轉成 K Space // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 初始化 float* PX_Scale = new float[SizeZ]; int* KSpaceIndexArray = new int[SizeZ]; float* GPU_PXScale; int* GPU_KSpaceIndexArray; cudaMalloc(&GPU_PXScale, sizeof(float) * SizeZ); cudaMalloc(&GPU_KSpaceIndexArray, sizeof(int) * SizeZ); // 設定一些系數 int OffsetBegin = 800; // 算出 PXScale 的 Array ComputePXScale << <SizeZ / NumThreads, NumThreads >> > (GPU_PXScale, OffsetBegin, ShiftValue, K_Step, SizeZ); CheckCudaError(); // 抓下來準備算 K Space Index (由於這邊如果使用 GPU 去做,會導致大部分的 thread 在等最大工作量的 thread,所以這裡 CPU 做會比較快) cudaMemcpy(PX_Scale, GPU_PXScale, sizeof(float) * SizeZ, cudaMemcpyDeviceToHost); // 算 K Space 的對應 Array int index = 1; int KSpaceOffset = PX_Scale[SizeZ - 1]; for (int i = 0; i <= KSpaceOffset; i++) { while (i >= PX_Scale[index]) { index++; } KSpaceIndexArray[i] = index; } for (int i = KSpaceOffset + 1; i < SizeZ; i++) KSpaceIndexArray[i] = -1; // 由於 K Space 不是線性關係,所以要從 KSpaceIndexArray,找 Index,再從左右兩個點中,內插出實際在這個 Index 的值 cudaMemcpy(GPU_KSpaceIndexArray, KSpaceIndexArray, sizeof(int) * SizeZ, cudaMemcpyHostToDevice); FrequencyAdjust << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTRawData, GPU_OCTFloatData, GPU_PXScale, GPU_KSpaceIndexArray, KSpaceOffset - CutValue, SizeX, SizeY, SizeZ); CheckCudaError(); // 釋放記憶體 cudaFree(GPU_PXScale); cudaFree(GPU_KSpaceIndexArray); cudaFree(GPU_OCTRawData); delete[] KSpaceIndexArray; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "4. λ Space 轉成 K Space : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 5. cuFFT // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif cufftHandle PlanHandle; cufftComplex* GPU_ComplexData; // 這邊是創建 FFT 的 Handle & C2C 的 cufftComplex int NX = SizeZ; int BatchSize = SizeX * SizeY; cufftPlan1d(&PlanHandle, NX, CUFFT_C2C, BatchSize); cudaMalloc(&GPU_ComplexData, sizeof(cufftComplex) * NX * BatchSize); CheckCudaError(); // 把資料塞進 Complex Data 裡 DataToComplexData << <dim3(SizeX, SizeY, SizeZ / NumThreads), NumThreads >> > (GPU_OCTFloatData, GPU_ComplexData, OCTDataSize); CheckCudaError(); // 執行 cuFFT(CUDA™ Fast Fourier Transform) cufftExecC2C(PlanHandle, GPU_ComplexData, GPU_ComplexData, CUFFT_FORWARD); CheckCudaError(); // 刪除鏡向(FFT轉完之後會兩邊對稱) & 搬移資料 // 想知道更多:https://www.youtube.com/watch?v=spUNpyF58BY ComplexDataToData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ComplexData, GPU_OCTFloatData, SizeX, SizeY, SizeZ, OCTDataSize); CheckCudaError(); // 刪除 cufftDestroy(PlanHandle); cudaFree(GPU_ComplexData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "5. cuFFT: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6. 位移 Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif float* GPU_ShiftData; cudaMalloc(&GPU_ShiftData, sizeof(float) * OCTDataSize / 2); // 因為一半相同,所以去掉了 // 這邊也是 ShiftFinalData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_OCTFloatData, GPU_ShiftData, SizeX, SizeY, SizeZ / 2, OCTDataSize / 2); CheckCudaError(); // 刪除記憶體 cudaFree(GPU_OCTFloatData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6. 搬移資料: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 6.5 TopView // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 這邊會抓出TopView float* GPU_OtherSideData; cudaMalloc(&GPU_OtherSideData, sizeof(float) * OCTDataSize / 2); GetOtherSideView << <SizeX, SizeY >> > (GPU_ShiftData, GPU_OtherSideData, SizeX, SizeY, SizeZ / 2); CheckCudaError(); cudaDeviceSynchronize(); // 找最大值 float MaxValue = 0, MinValue = 0; float *GPU_MaxElement = thrust::max_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); float *GPU_MinElement = thrust::min_element(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&MinValue, GPU_MinElement, sizeof(float), cudaMemcpyDeviceToHost); NormalizeData << < SizeX, SizeY >> > (GPU_OtherSideData, MaxValue, MinValue, SizeX * SizeY); CheckCudaError(); // 將 Top View 的圖,部會因為亮度而受影響 float MeanValue = thrust::reduce(thrust::device, GPU_OtherSideData, GPU_OtherSideData + SizeX * SizeY) / SizeX / SizeY; uchar* GPU_UintOtherSideData; cudaMalloc(&GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY); TransformOtherSideDataToImage << <SizeX, SizeY >> > (GPU_OtherSideData, GPU_UintOtherSideData, MeanValue, OtherSideMean, SizeX, SizeY); CheckCudaError(); // 刪除記憶體 cudaFree(GPU_OtherSideData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "6.5. TopView 產生: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 7. Normalize Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 算最大值 MaxValue = 0; GPU_MaxElement = thrust::max_element(thrust::device, GPU_ShiftData, GPU_ShiftData + OCTDataSize / 2); cudaMemcpy(&MaxValue, GPU_MaxElement, sizeof(float), cudaMemcpyDeviceToHost); CheckCudaError(); cudaDeviceSynchronize(); // 最小值 (拿一塊不會使用的 GPU 部分,來做 Normalize) // 拿一個正方形的區塊 // TL---x // |   | // |   | // x---BR MinValue = 0; for (int i = MinValuePixel_TL; i <= MinValuePixel_BR; i++) { // [first, last) int beginIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + i; int endIndex = SizeX * SizeZ / 2 + i * SizeZ / 2 + MinValuePixel_BR + 1; MinValue += thrust::reduce(thrust::device, GPU_ShiftData + beginIndex, GPU_ShiftData + endIndex); } MinValue /= (MinValuePixel_BR - MinValuePixel_TL + 1) * (MinValuePixel_BR - MinValuePixel_TL + 1); MinValue *= MinValueScalar; // 因為 Normaliza Data 要做一件事情是 除 (Max - Min) ,要預防他除以 0 // 所以這邊先判斷兩個是不是位置一樣 (因為如果整個 array 值都一樣,Min & Max 給的位置都會一樣(以驗證過)) assert(MaxValue != MinValue && "FFT後最大最小值一樣,資料有錯誤!!"); NormalizeData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, MaxValue, MinValue, OCTDataSize / 2); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "7. Normalize Data: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 8. 轉成圖 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 圖片的資料 uchar *GPU_UintDataArray; float* GPU_OCTSmoothData; cudaMalloc(&GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ); cudaMalloc(&GPU_OCTSmoothData, sizeof(float) * SizeX * SizeY * SizeZ); CheckCudaError(); // 轉圖片 TransformToImageAndBorderData << <dim3(SizeX, SizeY, SizeZ / NumThreads / 2), NumThreads >> > (GPU_ShiftData, GPU_OCTSmoothData, GPU_UintDataArray, SizeX, SizeY, SizeZ / 2, SmoothSizeRange); CheckCudaError(); // 設定一下其他參數 size = SizeY; rows = SizeX; cols = SizeZ / 2; // 刪除記憶體 cudaFree(GPU_ShiftData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "8. 轉成圖: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 9. 抓取邊界 // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif #pragma region Init SaveDelete(PointType); PointType = new uchar[size * rows * cols]; memset(PointType, 0, sizeof(uchar) * size * rows * cols); SaveDelete(PointType_1D); PointType_1D = new int[size * rows]; memset(PointType_1D, 0, sizeof(int) * size * rows); // 點的型別 uchar* GPU_PointType; cudaMalloc(&GPU_PointType, sizeof(uchar) * size * rows * cols); cudaMemset(GPU_PointType, 0, sizeof(uchar) * size * rows * cols); #pragma endregion #pragma region 抓取邊界 assert(rows <= NumThreads && "rows 要小於 1024 的限制"); // 要算出原始整條的強度值 float *GPU_BrightnessArray; cudaMalloc(&GPU_BrightnessArray, sizeof(float) * size * rows); ZCalcBrightness << <size, rows >> > (GPU_OCTSmoothData, GPU_BrightnessArray, size, rows, cols, StartIndex); CheckCudaError(); // 找最大最小值 & 刪除過飽合的部分 findMaxAndMinPeak << < size * rows * cols / NumThreads, NumThreads >> > (GPU_OCTSmoothData, GPU_BrightnessArray, GPU_PointType, size, rows, cols, MaxPeakThreshold, SatPeakThreshold); CheckCudaError(); // Parse 一些連續最小值 ParseMaxMinPeak << < size, rows >> > (GPU_PointType, size, rows, cols, StartIndex); CheckCudaError(); // 抓出一維陣列 int *GPU_PointType_BestN, *PointType_BestN; float* GPU_CandidateGap; cudaMalloc(&GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN); cudaMalloc(&GPU_CandidateGap, sizeof(float) * size * rows * cols); // 暫時用來存 Gap 的記憶體 cudaMemset(GPU_CandidateGap, 0, sizeof(float) * size * rows * cols); cudaMemset(GPU_PointType_BestN, 0, sizeof(int) * size * rows * ChooseBestN); PickBestChoiceToArray << < size, rows >> > (GPU_OCTSmoothData, GPU_PointType, GPU_CandidateGap, GPU_PointType_BestN, size, rows, cols, ChooseBestN, StartIndex, GoThroughThreshold); CheckCudaError(); // 算出 Neighbor 數目的陣列 float* GPU_NeighborCountArray; cudaMalloc(&GPU_NeighborCountArray, sizeof(float) * size * rows * ChooseBestN); CalcNeighbor << <size, rows >> > (GPU_PointType_BestN, GPU_NeighborCountArray, size, rows, cols, ChooseBestN, DenoiseWindowsRadius); CheckCudaError(); // 連結點 // 這個的大小 為 => 張數 * 250(rows) * 取幾個最大值(ChooseBestN個) * 每個最大值底下有 半徑個 (Raidus) * 的下 N 排的幾個最大值(ChooseBestN) int *GPU_Connect_Status; int ConnectStateSize = size * rows * ChooseBestN * ConnectRadius * ChooseBestN; cudaMalloc(&GPU_Connect_Status, sizeof(int) * ConnectStateSize); cudaMemset(GPU_Connect_Status, 0, sizeof(int) * ConnectStateSize); ConnectPointsStatus << < size * ChooseBestN, rows >> > (GPU_PointType_BestN, GPU_Connect_Status, size, rows, ChooseBestN, ConnectRadius); CheckCudaError(); // 把資料傳回 CPU int *Connect_Status = new int[ConnectStateSize]; PointType_BestN = new int[size * rows * ChooseBestN]; TestBestN = new int[size * rows * ChooseBestN]; cudaMemcpy(PointType, GPU_PointType, sizeof(uchar) * size * rows * cols, cudaMemcpyDeviceToHost); cudaMemcpy(Connect_Status, GPU_Connect_Status, sizeof(int) * ConnectStateSize, cudaMemcpyDeviceToHost); cudaMemcpy(PointType_BestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, cudaMemcpyDeviceToHost); cudaMemcpy(TestBestN, GPU_PointType_BestN, sizeof(int) * size * rows * ChooseBestN, cudaMemcpyDeviceToHost); CheckCudaError(); // 抓取最大的線 GetSurface(PointType_BestN, Connect_Status); #pragma endregion // 刪除記憶體 cudaFree(GPU_PointType); cudaFree(GPU_PointType_BestN); cudaFree(GPU_Connect_Status); cudaFree(GPU_OCTSmoothData); cudaFree(GPU_BrightnessArray); cudaFree(GPU_CandidateGap); cudaFree(GPU_NeighborCountArray); delete[] Connect_Status; delete[] PointType_BestN; // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "9. 抓取邊界: " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion #pragma region 10. 抓下 GPU Data // 開始 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock(); #endif // 刪除之前的資料 SaveDelete(VolumeData); VolumeData = new uchar[SizeX * SizeY * SizeZ / 2]; cudaMemcpy(VolumeData, GPU_UintDataArray, sizeof(uchar) * SizeX * SizeY * SizeZ / 2, cudaMemcpyDeviceToHost); SaveDelete(VolumeData_OtherSide); VolumeData_OtherSide = new uchar[SizeX * SizeY]; cudaMemcpy(VolumeData_OtherSide, GPU_UintOtherSideData, sizeof(uchar) * SizeX * SizeY, cudaMemcpyDeviceToHost); // 刪除 GPU cudaFree(GPU_UintDataArray); cudaFree(GPU_UintOtherSideData); // 結算 #ifdef SHOW_TRCUDAV2_DETAIL_TIME time = clock() - time; cout << "10. 抓下 GPU Data : " << ((float)time) / CLOCKS_PER_SEC << " sec" << endl; #endif #pragma endregion // 結算 #ifdef SHOW_TRCUDAV2_TOTAL_TIME totalTime = clock() - totalTime; cout << "轉換多張點雲: " << ((float)totalTime) / CLOCKS_PER_SEC << " sec" << endl; #endif } // 拿出圖片 vector<Mat> TRCudaV2::TransfromMatArray(bool SaveBorder = false) { // 轉換到 Mat vector<Mat> ImgArray; for (int i = 0; i < size; i++) { // 根據 Offset 拿圖片 Mat img(rows, cols, CV_8U, VolumeData + i * rows * cols); cvtColor(img, img, CV_GRAY2BGR); // 丟進堆疊 ImgArray.push_back(img); } if (SaveBorder) { // Debug 所有的 peak /*for (int i = 0; i < size; i++) for (int j = 0; j < rows * cols; j++) { int offsetIndex = i * rows * cols; int rowIndex = j / cols; int colIndex = j % cols; Vec3b color(0, 0, 0); if (PointType[offsetIndex + j] == 1) color = Vec3b(0, 255, 255); else if (PointType[offsetIndex + j] == 2) color = Vec3b(255, 255, 255); ImgArray[i].at<Vec3b>(rowIndex, colIndex) = color; }*/ // 只抓出最後的邊界 for (int i = 0; i < size; i++) for (int j = 0; j < rows; j++) { int index = i * rows + j; if (PointType_1D[index] != -1) { Point contourPoint(PointType_1D[index], j); circle(ImgArray[i], contourPoint, 2, Scalar(0, 255, 255), CV_FILLED); } } } return ImgArray; } Mat TRCudaV2::TransformToOtherSideView() { assert(size > 1 && "這段一定要大於一張圖"); Mat img(rows, size, CV_8U, VolumeData_OtherSide); cvtColor(img, img, CV_GRAY2BGR); return img; } void TRCudaV2::CopySingleBorder(int* LastArray) { assert(LastArray != NULL && PointType_1D != NULL && size == 1 && "要先初始化 Array 和要做轉點雲的部分!!"); // assert 抓出 call 錯的可能性 (這邊只能單張) memcpy(LastArray, PointType_1D, sizeof (int) * size * rows); } void TRCudaV2::CopyBorder(int* BorderArray) { assert(BorderArray != NULL && PointType_1D != NULL && size != 1 && "要先初始化 Array 和要做轉點雲的部分!!"); // assert 抓出 call 錯的可能性 (這邊要多張) memcpy(BorderArray, PointType_1D, sizeof(int) * size * rows); } bool TRCudaV2::ShakeDetect_Single(int* LastArray, bool ShowDebugMessage) { // 設定變數 int voteNum = 0; // 有效票數 float MoveDis = 0; // 移動的總共距離 // 跑每一個點 for (int i = 0; i < rows; i++) { if (PointType_1D[i] != -1 && LastArray[i] != -1) { MoveDis += abs(PointType_1D[i] - LastArray[i]); voteNum++; } } // 判斷是否有有效資料 if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if(ShowDebugMessage) cout << "晃動距離(pixel): " << (float)MoveDis << endl; // 這邊是代表沒有晃動 if (MoveDis < OCT_Move_Threshold) return false; } return true; } bool TRCudaV2::ShakeDetect_Multi(bool UsePreiseThreshold, bool ShowDebugMessage) { // 找 60 ~ 200 裡面有效的有沒有斷層 int voteNum = 0; // 有效票數 float MoveDis = 0; // 移動的總共距離 // Reverse 後的 0 ~ 250 for (int i = 60; i < 200; i++) { bool IsMove = false; // 這邊先預設給這個值,後面會換掉 int leftIndex = 124 * rows + i; // 第 124 張 int rightIndex = 125 * rows + i; // 第 125 張 // 從中間往前找 for (int j = size / 2 - 1; j >= 0; j--) if (PointType_1D[j * rows + i] != -1) { leftIndex = j * rows + i; break; } // 從中間像後找 for (int j = size / 2; j < size; j++) if (PointType_1D[j] != -1) { rightIndex = j * rows + i; break; } int leftY = PointType_1D[leftIndex]; int rightY = PointType_1D[rightIndex]; // 確認有效票數 if (PointType_1D[leftIndex] != -1 && PointType_1D[rightIndex] != -1) { int DisMid = abs(PointType_1D[rightIndex] - PointType_1D[leftIndex]); MoveDis += DisMid; voteNum++; } } // 判斷是否有有效資料 if (voteNum > OCT_Valid_VoteNum) { MoveDis /= voteNum; // Debug Message if (ShowDebugMessage) cout << "晃動距離(pixel): " << (float)MoveDis << endl; // 這邊是代表沒有晃動 if (UsePreiseThreshold) { // 用較輕確的結果 if (MoveDis < OCT_Move_Precise_Threshold) return false; } else { // 用較不精確的結果 if (MoveDis < OCT_Move_Threshold) return false; } } else if (ShowDebugMessage) cout << "資料量太少!!" << endl; return true; } ////////////////////////////////////////////////////////////////////////// // Helper Function ////////////////////////////////////////////////////////////////////////// void TRCudaV2::GetSurface(int *PointType_BestN, int *Connect_Status) { // 選 N 個 #pragma omp parallel for //num_thread(4) for (int i = 0; i < size; i++) { // 每個 10 段下去 Sample int RowGap = rows / 10; vector<vector<ConnectInfo>> StatusVector; for (int j = 0; j < rows; j += RowGap) for (int chooseNIndex = 0; chooseNIndex < ChooseBestN; chooseNIndex++) { int begin = j; int end = j; // 代表這個點沒有東西,所以略過 if (PointType_BestN[i * rows * ChooseBestN + j * ChooseBestN + chooseNIndex] == -1) continue; // 連接狀況 vector<ConnectInfo> Connect; #pragma region 往上找 // 先加上自己 ConnectInfo info; info.rowIndex = j; info.chooseIndex = chooseNIndex; Connect.push_back(info); int FindIndex = j; int FindChooseIndex = chooseNIndex; bool IsFind = true; while (IsFind && FindIndex > 0) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size (FindIndex - k) * ChooseBestN * ConnectRadius * ChooseBestN + // Rows nextChooseNIndex * ConnectRadius * ChooseBestN + // 現在在的 Top N 的點 (這邊要注意,這邊應該要放的是 要找的那個點的 ChooseIndex) k * ChooseBestN + // 半徑 FindChooseIndex; if (FindIndex - k >= 0 && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // 判斷是否有找到,找到就繼續找 if (minMoveIndex != -1) { // 更便位置 FindIndex = FindIndex - minMoveIndex; FindChooseIndex = minChooseIndex; // 丟進陣列 info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // 有找到 IsFind = true; } else IsFind = false; } #pragma endregion #pragma region 往下找 FindIndex = j; FindChooseIndex = chooseNIndex; while (IsFind && FindIndex < rows - 1) { int minMoveIndex = -1; int minChooseIndex = -1; int tempValue = ConnectRadius * ConnectRadius; for (int k = 1; k < ConnectRadius; k++) for (int nextChooseNIndex = 0; nextChooseNIndex < ChooseBestN; nextChooseNIndex++) { int index = i * rows * ChooseBestN * ConnectRadius * ChooseBestN + // Size FindIndex * ChooseBestN * ConnectRadius * ChooseBestN + // Rows FindChooseIndex * ConnectRadius * ChooseBestN + // 現在在的 Top N 的點 k * ChooseBestN + // 半徑 nextChooseNIndex; if (FindIndex + k < rows && Connect_Status[index] != 0 && tempValue > Connect_Status[index]) { tempValue = Connect_Status[index]; minMoveIndex = k; minChooseIndex = nextChooseNIndex; } } // 判斷是否有找到,找到就繼續找 if (minMoveIndex != -1) { // 更便位置 FindIndex = FindIndex + minMoveIndex; FindChooseIndex = minChooseIndex; // 丟進陣列 info.rowIndex = FindIndex; info.chooseIndex = minChooseIndex; Connect.push_back(info); // 有找到 IsFind = true; } else IsFind = false; } #pragma endregion // 判斷是否有連出東西,如果連出來的東西大於 1 if (Connect.size() > 1) { // 由小排到大 sort(Connect.begin(), Connect.end(), SortByRows); StatusVector.push_back(Connect); } } // 前面的幾個張數,可能會找不到點,所以跳過處理 if (StatusVector.size() == 0) { memset(&PointType_1D[i * rows], -1, sizeof(int) * rows); continue; } // 排序之後取最大 sort(StatusVector.begin(), StatusVector.end(), SortByVectorSize); // 超出不重疊的最好連接方法 (最多取前三個) vector<int> BestCandidate; int Begin = rows; int End = 0; for (int j = 0; j < StatusVector.size() && j < 3; j++) { int CurrentBegin = StatusVector[j][0].rowIndex; int CurrentEnd = StatusVector[j][StatusVector[j].size() - 1].rowIndex; if (Begin > CurrentBegin) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } if (End < CurrentEnd) { Begin = min(Begin, CurrentBegin); End = max(End, CurrentEnd); BestCandidate.push_back(j); } } // 加到裡面 for (int j = 1; j < BestCandidate.size(); j++) if (StatusVector[BestCandidate[j]].size() >= 3) for (int k = 0; k < StatusVector[BestCandidate[j]].size(); k++) StatusVector[0].push_back(StatusVector[j][k]); vector<ConnectInfo> LineVector = StatusVector[0]; int index = 0; // LineVector Index for (int j = 0; j < rows; j++) { int Type1D_Index = i * rows + j; if (LineVector[index].rowIndex != j) PointType_1D[Type1D_Index] = -1; else if (LineVector[index].rowIndex == j) { int BestN_Index = i * rows * ChooseBestN + // 張 LineVector[index].rowIndex * ChooseBestN + // row LineVector[index].chooseIndex; // ChooseIndex // 放進 PointType PointType_1D[j + i * rows] = PointType_BestN[BestN_Index]; index++; if (index >= LineVector.size()) { for (int k = j + 1; k < rows; k++) PointType_1D[k + i * rows] = -1; break; } } } } // Smooth int* tempPointType_1D = new int[size * rows]; for (int i = 0; i < size; i++) for (int j = 0; j < rows; j ++) { int totalPoint = 0; int totalZ = 0; int index = i * rows + j; if (PointType_1D[index] == -1) { tempPointType_1D[index] = -1; continue; } for (int k = -DenoiseWindowsRadius; k <= DenoiseWindowsRadius; k++) for (int l = -DenoiseWindowsRadius; l <= DenoiseWindowsRadius; l++) { int currentI = i + k; int currentJ = j + l; if (0 <= currentI && currentI < size && 0 <= currentJ && currentJ < rows) { int currentIndex = currentI *rows + currentJ; if (PointType_1D[currentIndex] != -1) { totalPoint++; totalZ += PointType_1D[currentIndex]; } } } tempPointType_1D[index] = totalZ / totalPoint; } memcpy(PointType_1D, tempPointType_1D, sizeof(int) * size * rows); delete[] tempPointType_1D; } bool TRCudaV2::SortByRows(ConnectInfo left, ConnectInfo right) { return left.rowIndex < right.rowIndex; } bool TRCudaV2::SortByVectorSize(vector<ConnectInfo> left, vector<ConnectInfo> right) { return right.size() < left.size(); } void TRCudaV2::CheckCudaError() { cudaError GPU_Error = cudaGetLastError(); if (GPU_Error != cudaSuccess) { cout << cudaGetErrorString(GPU_Error) << endl; assert(false); exit(-1); } } void TRCudaV2::SaveDelete(void* pointer) { if (pointer != NULL) delete[] pointer; }
5d6108ebef8fb7f9616a08188f8c96e2c5a44503.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "updateP.h" // CUDA kernel function __global__ void op_cuda_updateP( double *arg0, double *arg1, const double *arg2, int offset_s, int set_size ) { // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call updateP( arg0+n, arg1+n, arg2 ); } } // host stub function void op_par_loop_updateP(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2 ){ double *arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; if (OP_diags>2) { printf(" kernel routine w/o indirection: updateP\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(7); OP_kernels[7].name = name; OP_kernels[7].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for (int d=0; d<1; d++) ((double *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_7 int nthread = OP_BLOCK_SIZE_7; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = nshared*nthread; hipLaunchKernelGGL(( op_cuda_updateP), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_updateP execution failed\n"); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[7].time += wall_t2 - wall_t1; OP_kernels[7].transfer += (float)set->size * arg0.size; OP_kernels[7].transfer += (float)set->size * arg1.size * 2.0f; }
5d6108ebef8fb7f9616a08188f8c96e2c5a44503.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "updateP.h" // CUDA kernel function __global__ void op_cuda_updateP( double *arg0, double *arg1, const double *arg2, int offset_s, int set_size ) { // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call updateP( arg0+n, arg1+n, arg2 ); } } // host stub function void op_par_loop_updateP(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2 ){ double *arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; if (OP_diags>2) { printf(" kernel routine w/o indirection: updateP\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(7); OP_kernels[7].name = name; OP_kernels[7].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // transfer constants to GPU int consts_bytes = 0; consts_bytes += ROUND_UP(1*sizeof(double)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OP_consts_h + consts_bytes; arg2.data_d = OP_consts_d + consts_bytes; for (int d=0; d<1; d++) ((double *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(1*sizeof(double)); mvConstArraysToDevice(consts_bytes); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_7 int nthread = OP_BLOCK_SIZE_7; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = nshared*nthread; op_cuda_updateP<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, offset_s, set->size ); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_updateP execution failed\n"); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[7].time += wall_t2 - wall_t1; OP_kernels[7].transfer += (float)set->size * arg0.size; OP_kernels[7].transfer += (float)set->size * arg1.size * 2.0f; }
95a53c00016ce0d6c52e519b5e0bb8d116d42236.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cleanCopy(int *S, int *D){ D[threadIdx.x] = S[threadIdx.x]; }
95a53c00016ce0d6c52e519b5e0bb8d116d42236.cu
#include "includes.h" __global__ void cleanCopy(int *S, int *D){ D[threadIdx.x] = S[threadIdx.x]; }
c2be680e1259810e00a9e6fe7afbc06db7e4e0d1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include "glm/glm.hpp" #include "utilities.h" #include "kernel.h" #if SHARED == 1 #define ACC(x,y,z) sharedMemAcc(x,y,z) #else #define ACC(x,y,z) naiveAcc(x,y,z) #endif //GLOBALS dim3 threadsPerBlock(blockSize); int numObjects; const float planetMass = MASS; const __device__ float starMass = 5e10; //size of the height map in simulation space glm::vec4 * dev_pos; glm::vec3 * dev_vel; glm::vec3 * dev_acc; float * dev_density; float * dev_pressure; glm::vec3 * dev_force; // Extra arrays for more advanced integration techniques glm::vec3 * dev_acc_2; void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { if( line >= 0 ) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index) { thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } /********************** SPH Functions **********************/ // kernels from // http://www.matthiasmueller.info/publications/sca03.pdf // More reference for understanding from // http://andrew.gibiansky.com/blog/physics/computational-fluid-dynamics/ __device__ float kernel_general(float xij) { if ( xij > kernelSize) return 0.0f; else // poly6 kernel return (315.0 * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij))/ (64.0 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); /* // spiky kernel return (15.0 * (kernelSize - xij) * (kernelSize - xij) * (kernelSize - xij)) / ( pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); // viscous kernel return (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ } // Check general grad and lap __device__ float kernel_general_gradient(float xij) { if ( xij > kernelSize) return 0.0f; else return (-945 * xij * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij))/ (32 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); } __device__ float kernel_general_laplacian(float xij) { if ( xij > kernelSize) return 0.0f; else return (945 * (kernelSizeSqr - xij*xij) * (4*xij*xij - (kernelSizeSqr - xij*xij) ) )/ (32 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); } __device__ float kernel_visc_laplacian(float xij) { if ( xij > kernelSize) return 0.0f; else // viscous kernel /* (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ return (45.0 * (kernelSize - xij))/(pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); } __device__ float kernel_press_gradient(float xij) { if ( xij > kernelSize) return 0.0f; else // viscous kernel /* (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ return (- 45.0 * (kernelSize - xij) * (kernelSize - xij))/(pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); } /***********************************************************/ //Generate randomized starting positions for the planets in the XY plane //Also initialized the masses __global__ void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { // go from -xmax to xmax, -ymax to ymax and z = 0 upwards glm::vec3 llcorner = glm::vec3(-B_XMAX,-B_YMAX,B_ZMIN); int numberOfSpheresInX = (2*B_XMAX) / (2*RADIUS); int numberOfSpheresInY = (2*B_YMAX) / (2*RADIUS); int z = index * 1.0f / (numberOfSpheresInX * numberOfSpheresInY); int y = (index - (z * numberOfSpheresInX * numberOfSpheresInY)) / numberOfSpheresInX; int x = index - (z * numberOfSpheresInX * numberOfSpheresInY) - (y * numberOfSpheresInX); arr[index].x = llcorner.x + 2*RADIUS*x; arr[index].y = llcorner.y + 2*RADIUS*y; arr[index].z = llcorner.z + 2*RADIUS*z; /* glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f); arr[index].x = rand.x; arr[index].y = rand.y; arr[index].z = 0.45f * (rand.z + 0.5f * scale); */ arr[index].w = mass; } } //Determine velocity from the distance from the center star. Not super physically accurate because //the mass ratio is too close, but it makes for an interesting looking scene __global__ void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { //glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); //float r = glm::length(R) + EPSILON; //float s = sqrt(G*starMass/r); //glm::vec3 D = glm::normalize(glm::cross(R/r,glm::vec3(0,0,1))); arr[index].x = 0;//s*D.x; arr[index].y = 0;//s*D.y; arr[index].z = 0;//s*D.z; } } //Generate randomized starting velocities in the XY plane __global__ void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f); arr[index].x = 0;//rand.x; arr[index].y = 0;//rand.y; arr[index].z = 0;//rand.z; } } //TODO: Determine force between two bodies __device__ glm::vec3 calculateAcceleration(glm::vec4 us, glm::vec4 them) { // G*m_us*m_them //F = ------------- // r^2 // // G*m_us*m_them G*m_them //a = ------------- = -------- // m_us*r^2 r^2 glm::vec3 myPos = glm::vec3( us.x, us.y, us.z); glm::vec3 urPos = glm::vec3(them.x,them.y,them.z); glm::vec3 direction = urPos - myPos; float distance = glm::length(direction); if(distance > 0.000001f) { float GConst = G; return GConst * them.w * (1.0f/glm::dot(direction,direction)) * glm::normalize(direction); } else return glm::vec3(0.0f); } //TODO: Core force calc kernel global memory __device__ glm::vec3 naiveAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos) { return GRAVITY; glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass)); for(int i=0; i< N ; i++) { acc += calculateAcceleration(my_pos, their_pos[i]); } return acc; } //TODO: Core force calc kernel shared memory __device__ glm::vec3 sharedMemAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos) { return GRAVITY; __shared__ glm::vec4 sharedBodyData[blockSize]; int tid = threadIdx.x; int bid = blockIdx.x; glm::vec3 acc = glm::vec3(0.0f); //calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass)); int numberOfLoops = ceil((1.0f*N) / blockSize); for(int i=0; i<numberOfLoops; i++) { int deltaIndex = ((i + bid)%numberOfLoops ); int index = deltaIndex * blockSize + tid; if( index < N) { sharedBodyData[tid] = their_pos[index]; } __syncthreads(); for(int j=0; j< blockSize; j++) { if((deltaIndex * blockSize + j < N) ) acc += calculateAcceleration(my_pos,sharedBodyData[j]); } } return acc; } __global__ void calculateSPHDensityPressure(int N, glm::vec4 * pos, float * density, float * pressure) { __shared__ glm::vec4 sharedPos[blockSize]; int index = threadIdx.x + (blockIdx.x * blockDim.x); int tid = threadIdx.x; int bid = blockIdx.x; int numberOfLoops = ceil((1.0f*N) / blockSize); glm::vec4 myPos; if(index < N) myPos = pos[index]; float myDensity = 0; for(int i=0; i<numberOfLoops; i++) { // For this block'd data, bring it into shared memory int deltaIndex = ((i + bid)%numberOfLoops ); int delIndex = deltaIndex * blockSize + tid; if(delIndex < N) sharedPos[tid] = pos[delIndex]; __syncthreads(); // Traverse shared memory for(int j=0; j< blockSize; j++) { if( index < N && (deltaIndex * blockSize + j < N)) { glm::vec4 theirPos = sharedPos[j]; glm::vec3 r = glm::vec3(myPos.x-theirPos.x, myPos.y-theirPos.y, myPos.z-theirPos.z); float xij = glm::length(r); float W = kernel_general(xij); myDensity += theirPos.w * W; } } __syncthreads(); } if(index < N) { density[index] = myDensity; pressure[index] = STIFFNESS * (myDensity - REF_DENSITY); } } __global__ void calculateSPHForces(int N, glm::vec4 * pos, glm::vec3 * vel, float *density, float * pressure, glm::vec3 * acc) { __shared__ glm::vec4 sharedPositions[blockSize]; __shared__ float sharedDensity[blockSize]; __shared__ float sharedPressure[blockSize]; __shared__ glm::vec3 sharedVelocity[blockSize]; int index = threadIdx.x + (blockIdx.x * blockDim.x); int tid = threadIdx.x; int bid = blockIdx.x; int numberOfLoops = ceil((1.0f*N) / blockSize); glm::vec4 myPos; float myDens; float myPress; glm::vec3 myVel; if(index < N) { myPos = pos[index]; myDens = density[index]; myPress = pressure[index]; myVel = vel[index]; } glm::vec3 pressureAcc = glm::vec3(0); glm::vec3 viscosityAcc = glm::vec3(0); glm::vec3 surfaceAcc = glm::vec3(0); for(int i=0; i<numberOfLoops; i++) { // For this block'd data, bring it into shared memory int deltaIndex = ((i + bid)%numberOfLoops ); int delIndex = deltaIndex * blockSize + tid; if(delIndex < N) { sharedPositions[tid] = pos[delIndex]; sharedDensity[tid] = density[delIndex]; sharedPressure[tid] = pressure[delIndex]; sharedVelocity[tid] = vel[delIndex]; } __syncthreads(); // Traverse shared memory for(int j=0; j< blockSize; j++) { if( index < N && (deltaIndex * blockSize + j < N)) { glm::vec4 theirPos = sharedPositions[j]; float theirDens = sharedDensity[j]; float theirPress = sharedPressure[j]; glm::vec3 theirVel = sharedVelocity[j]; glm::vec3 r = glm::vec3(myPos.x-theirPos.x, myPos.y-theirPos.y, myPos.z-theirPos.z); float xij = glm::length(r); // Symmetrization based on Monaghan float pressureTerm = - theirPos.w * (myPress/(myDens*myDens) + theirPress/(theirDens*theirDens) ) * kernel_press_gradient(xij); if(pressureTerm == pressureTerm && fabs(myDens) > EPSIL && fabs(theirDens) > EPSIL) // NaN check pressureAcc += pressureTerm * r/(xij + EPSIL); if(myDens==myDens && theirDens == theirDens && fabs(myDens) > EPSIL && fabs(theirDens) > EPSIL) // NaN check viscosityAcc += VISCOSITY * theirPos.w * (theirVel - myVel)/(myDens * theirDens) * kernel_visc_laplacian(xij); } } __syncthreads(); } if(index < N) { acc[index] = pressureAcc + viscosityAcc + surfaceAcc + GRAVITY; //printf("%3d: %3.3f %3.3f %3.3f\n",index, acc[index].x, acc[index].y, acc[index].z); //printf("\tp: %3.3f %3.3f %3.3f \n", pressureAcc.x, pressureAcc.y, pressureAcc.z); //printf("\tv: %3.3f %3.3f %3.3f \n", viscosityAcc.x, viscosityAcc.y, viscosityAcc.z); } } __global__ void updateSPHExplicit(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < N) { glm::vec3 newVel = vel[index] + dt * acc[index]; vel[index] = newVel; pos[index].x += dt * newVel.x; pos[index].y += dt * newVel.y; pos[index].z += dt * newVel.z; } } //Simple Euler integration scheme __global__ void updateF(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos = glm::vec4(0,0,0,1); if( index < N ) my_pos = pos[index]; glm::vec3 accel = ACC(N, my_pos, pos); if( index < N ) acc[index] = accel; } __global__ void updateP(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { vel[index] += acc[index] * dt; pos[index].x += vel[index].x * dt; pos[index].y += vel[index].y * dt; pos[index].z += vel[index].z * dt; } } __global__ void updateVelVerletPart1F(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos; glm::vec3 accel; if( index < N ) my_pos = pos[index]; accel = ACC(N,my_pos,pos); if( index < N ) acc[index] = accel; } __global__ void updateVelVerletPart1P(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { glm::vec3 deltaPos = dt * (vel[index] + dt * 0.5f * acc[index]); pos[index].x += deltaPos.x; pos[index].y += deltaPos.y; pos[index].z += deltaPos.z; } } __global__ void updateVelVerletPart2F(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc_2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos; glm::vec3 accel; if( index < N ) my_pos = pos[index]; accel = ACC(N,my_pos,pos); if( index < N ) acc_2[index] = accel; } __global__ void updateVelVerletPart2P(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc, glm::vec3 * acc_2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { vel[index] += dt * 0.5f * (acc[index] + acc_2[index]); } } __global__ void handleCollisions(int N, glm::vec4 * pos, glm::vec3 * vel) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { glm::vec4 myPos = pos[index]; glm::vec3 myVel = vel[index]; // Simple impulse collision handling // Down side, adds energy into the system // Consider stiff springs with collision buffers if( myPos.z < 0.0f) { myPos.z = EPSIL; myVel.z = -DRAG*myVel.z; } if( myPos.z > ZMAX) { myPos.z = ZMAX - EPSIL; myVel.z = -DRAG*myVel.z; } if(myPos.y < -YMAX) { myPos.y = -(YMAX - EPSIL); myVel.y = -DRAG*myVel.y; } if(myPos.y > YMAX) { myPos.y = YMAX - EPSIL; myVel.y = -DRAG*myVel.y; } if(myPos.x < -XMAX) { myPos.x = -(XMAX - EPSIL); myVel.x = -DRAG*myVel.x; } if(myPos.x > XMAX) { myPos.x = XMAX - EPSIL; myVel.x = -DRAG*myVel.x; } pos[index] = myPos; vel[index] = myVel; } } //Update the vertex buffer object //(The VBO is where OpenGL looks for the positions for the planets) __global__ void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale_w = -2.0f / s_scale; float c_scale_h = -2.0f / s_scale; // z is negative in downward direction float c_scale_z = 2.0f / s_scale; if(index<N) { vbo[4*index+0] = pos[index].x*c_scale_w; vbo[4*index+1] = pos[index].y*c_scale_h; vbo[4*index+2] = pos[index].z*c_scale_z; vbo[4*index+3] = 1; } } //Update the texture pixel buffer object //(This texture is where openGL pulls the data for the height map) __global__ void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int x = index % width; int y = index / width; float w2 = width / 2.0; float h2 = height / 2.0; float c_scale_w = width / s_scale; float c_scale_h = height / s_scale; glm::vec3 color(0.05, 0.15, 0.3); glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos); if(x<width && y<height) { float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z)); // Each thread writes one pixel location in the texture (textel) pbo[index].w = (mag < 1.0f) ? mag : 1.0f; } } /************************************* * Wrappers for the __global__ calls * *************************************/ //Initialize memory, update some globals void initCuda(int N) { numObjects = N; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); hipMalloc((void**)&dev_pos, N*sizeof(glm::vec4)); checkCUDAErrorWithLine("Kernel failed!"); hipMalloc((void**)&dev_vel, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); hipMalloc((void**)&dev_acc, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); // SPH hipMalloc((void**)&dev_density, N*sizeof(float)); checkCUDAErrorWithLine("Kernel failed!"); hipMalloc((void**)&dev_pressure, N*sizeof(float)); checkCUDAErrorWithLine("Kernel failed!"); hipMalloc((void**)&dev_force, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); // For velocityVerlet hipMalloc((void**)&dev_acc_2, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( generateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( generateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("Kernel failed!"); } void freeCuda(int N) { hipFree(dev_pos); hipFree(dev_vel); hipFree(dev_acc); hipFree(dev_density); hipFree(dev_pressure); hipFree(dev_force); hipFree(dev_acc_2); } void resetSim(int N) { numObjects = N; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); hipLaunchKernelGGL(( generateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( generateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("Kernel failed!"); } void cudaCollisionsWrapper() { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); hipLaunchKernelGGL(( handleCollisions), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, dev_vel); checkCUDAErrorWithLine("Kernel failed!"); } void cudaNBodyUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); hipLaunchKernelGGL(( updateF), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc); hipLaunchKernelGGL(( updateP), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); cudaCollisionsWrapper(); } void cudaNBodyUpdateVelocityVerletWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); hipLaunchKernelGGL(( updateVelVerletPart1F), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( updateVelVerletPart1P), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( updateVelVerletPart2F), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc_2); checkCUDAErrorWithLine("Kernel failed!"); hipLaunchKernelGGL(( updateVelVerletPart2P), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc, dev_acc_2); checkCUDAErrorWithLine("Kernel failed!"); cudaCollisionsWrapper(); } void cudaSPHUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); // Calculate Density and Pressures hipLaunchKernelGGL(( calculateSPHDensityPressure), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects,dev_pos,dev_density,dev_pressure); checkCUDAErrorWithLine("Kernel failed!"); // Calculate Accelerations hipLaunchKernelGGL(( calculateSPHForces), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects,dev_pos,dev_vel,dev_density,dev_pressure,dev_acc); checkCUDAErrorWithLine("Kernel failed!"); // Update Positions and Velocities hipLaunchKernelGGL(( updateSPHExplicit), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects,dt,dev_pos,dev_vel,dev_acc); checkCUDAErrorWithLine("Kernel failed!"); // Check for collisions with boundaries cudaCollisionsWrapper(); checkCUDAErrorWithLine("Kernel failed!"); } void cudaUpdateVBO(float * vbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); hipLaunchKernelGGL(( sendToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, vbodptr, width, height, scene_scale); checkCUDAErrorWithLine("Kernel failed!"); } void cudaUpdatePBO(float4 * pbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(blockSize))); hipLaunchKernelGGL(( sendToPBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, pbodptr, width, height, scene_scale); checkCUDAErrorWithLine("Kernel failed!"); }
c2be680e1259810e00a9e6fe7afbc06db7e4e0d1.cu
#include <stdio.h> #include <cuda.h> #include <cmath> #include "glm/glm.hpp" #include "utilities.h" #include "kernel.h" #if SHARED == 1 #define ACC(x,y,z) sharedMemAcc(x,y,z) #else #define ACC(x,y,z) naiveAcc(x,y,z) #endif //GLOBALS dim3 threadsPerBlock(blockSize); int numObjects; const float planetMass = MASS; const __device__ float starMass = 5e10; //size of the height map in simulation space glm::vec4 * dev_pos; glm::vec3 * dev_vel; glm::vec3 * dev_acc; float * dev_density; float * dev_pressure; glm::vec3 * dev_force; // Extra arrays for more advanced integration techniques glm::vec3 * dev_acc_2; void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { if( line >= 0 ) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Function that generates static. __host__ __device__ glm::vec3 generateRandomNumberFromThread(float time, int index) { thrust::default_random_engine rng(hash(index*time)); thrust::uniform_real_distribution<float> u01(0,1); return glm::vec3((float) u01(rng), (float) u01(rng), (float) u01(rng)); } /********************** SPH Functions **********************/ // kernels from // http://www.matthiasmueller.info/publications/sca03.pdf // More reference for understanding from // http://andrew.gibiansky.com/blog/physics/computational-fluid-dynamics/ __device__ float kernel_general(float xij) { if ( xij > kernelSize) return 0.0f; else // poly6 kernel return (315.0 * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij))/ (64.0 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); /* // spiky kernel return (15.0 * (kernelSize - xij) * (kernelSize - xij) * (kernelSize - xij)) / ( pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); // viscous kernel return (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ } // Check general grad and lap __device__ float kernel_general_gradient(float xij) { if ( xij > kernelSize) return 0.0f; else return (-945 * xij * (kernelSizeSqr - xij*xij) * (kernelSizeSqr - xij*xij))/ (32 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); } __device__ float kernel_general_laplacian(float xij) { if ( xij > kernelSize) return 0.0f; else return (945 * (kernelSizeSqr - xij*xij) * (4*xij*xij - (kernelSizeSqr - xij*xij) ) )/ (32 * pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr * kernelSize); } __device__ float kernel_visc_laplacian(float xij) { if ( xij > kernelSize) return 0.0f; else // viscous kernel /* (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ return (45.0 * (kernelSize - xij))/(pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); } __device__ float kernel_press_gradient(float xij) { if ( xij > kernelSize) return 0.0f; else // viscous kernel /* (15.0 * (-(0.5*(xij*xij*xij)/(kernelSizeSqr * kernelSize)) + ((xij*xij)/kernelSizeSqr) + (0.5 * kernelSize/xij) - 1.0) ) / (2.0 * pi * kernelSizeSqr * kernelSize); */ return (- 45.0 * (kernelSize - xij) * (kernelSize - xij))/(pi * kernelSizeSqr * kernelSizeSqr * kernelSizeSqr); } /***********************************************************/ //Generate randomized starting positions for the planets in the XY plane //Also initialized the masses __global__ void generateRandomPosArray(int time, int N, glm::vec4 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { // go from -xmax to xmax, -ymax to ymax and z = 0 upwards glm::vec3 llcorner = glm::vec3(-B_XMAX,-B_YMAX,B_ZMIN); int numberOfSpheresInX = (2*B_XMAX) / (2*RADIUS); int numberOfSpheresInY = (2*B_YMAX) / (2*RADIUS); int z = index * 1.0f / (numberOfSpheresInX * numberOfSpheresInY); int y = (index - (z * numberOfSpheresInX * numberOfSpheresInY)) / numberOfSpheresInX; int x = index - (z * numberOfSpheresInX * numberOfSpheresInY) - (y * numberOfSpheresInX); arr[index].x = llcorner.x + 2*RADIUS*x; arr[index].y = llcorner.y + 2*RADIUS*y; arr[index].z = llcorner.z + 2*RADIUS*z; /* glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index)-0.5f); arr[index].x = rand.x; arr[index].y = rand.y; arr[index].z = 0.45f * (rand.z + 0.5f * scale); */ arr[index].w = mass; } } //Determine velocity from the distance from the center star. Not super physically accurate because //the mass ratio is too close, but it makes for an interesting looking scene __global__ void generateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec4 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { //glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); //float r = glm::length(R) + EPSILON; //float s = sqrt(G*starMass/r); //glm::vec3 D = glm::normalize(glm::cross(R/r,glm::vec3(0,0,1))); arr[index].x = 0;//s*D.x; arr[index].y = 0;//s*D.y; arr[index].z = 0;//s*D.z; } } //Generate randomized starting velocities in the XY plane __global__ void generateRandomVelArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index < N) { glm::vec3 rand = scale*(generateRandomNumberFromThread(time, index) - 0.5f); arr[index].x = 0;//rand.x; arr[index].y = 0;//rand.y; arr[index].z = 0;//rand.z; } } //TODO: Determine force between two bodies __device__ glm::vec3 calculateAcceleration(glm::vec4 us, glm::vec4 them) { // G*m_us*m_them //F = ------------- // r^2 // // G*m_us*m_them G*m_them //a = ------------- = -------- // m_us*r^2 r^2 glm::vec3 myPos = glm::vec3( us.x, us.y, us.z); glm::vec3 urPos = glm::vec3(them.x,them.y,them.z); glm::vec3 direction = urPos - myPos; float distance = glm::length(direction); if(distance > 0.000001f) { float GConst = G; return GConst * them.w * (1.0f/glm::dot(direction,direction)) * glm::normalize(direction); } else return glm::vec3(0.0f); } //TODO: Core force calc kernel global memory __device__ glm::vec3 naiveAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos) { return GRAVITY; glm::vec3 acc = calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass)); for(int i=0; i< N ; i++) { acc += calculateAcceleration(my_pos, their_pos[i]); } return acc; } //TODO: Core force calc kernel shared memory __device__ glm::vec3 sharedMemAcc(int N, glm::vec4 my_pos, glm::vec4 * their_pos) { return GRAVITY; __shared__ glm::vec4 sharedBodyData[blockSize]; int tid = threadIdx.x; int bid = blockIdx.x; glm::vec3 acc = glm::vec3(0.0f); //calculateAcceleration(my_pos, glm::vec4(0,0,0,starMass)); int numberOfLoops = ceil((1.0f*N) / blockSize); for(int i=0; i<numberOfLoops; i++) { int deltaIndex = ((i + bid)%numberOfLoops ); int index = deltaIndex * blockSize + tid; if( index < N) { sharedBodyData[tid] = their_pos[index]; } __syncthreads(); for(int j=0; j< blockSize; j++) { if((deltaIndex * blockSize + j < N) ) acc += calculateAcceleration(my_pos,sharedBodyData[j]); } } return acc; } __global__ void calculateSPHDensityPressure(int N, glm::vec4 * pos, float * density, float * pressure) { __shared__ glm::vec4 sharedPos[blockSize]; int index = threadIdx.x + (blockIdx.x * blockDim.x); int tid = threadIdx.x; int bid = blockIdx.x; int numberOfLoops = ceil((1.0f*N) / blockSize); glm::vec4 myPos; if(index < N) myPos = pos[index]; float myDensity = 0; for(int i=0; i<numberOfLoops; i++) { // For this block'd data, bring it into shared memory int deltaIndex = ((i + bid)%numberOfLoops ); int delIndex = deltaIndex * blockSize + tid; if(delIndex < N) sharedPos[tid] = pos[delIndex]; __syncthreads(); // Traverse shared memory for(int j=0; j< blockSize; j++) { if( index < N && (deltaIndex * blockSize + j < N)) { glm::vec4 theirPos = sharedPos[j]; glm::vec3 r = glm::vec3(myPos.x-theirPos.x, myPos.y-theirPos.y, myPos.z-theirPos.z); float xij = glm::length(r); float W = kernel_general(xij); myDensity += theirPos.w * W; } } __syncthreads(); } if(index < N) { density[index] = myDensity; pressure[index] = STIFFNESS * (myDensity - REF_DENSITY); } } __global__ void calculateSPHForces(int N, glm::vec4 * pos, glm::vec3 * vel, float *density, float * pressure, glm::vec3 * acc) { __shared__ glm::vec4 sharedPositions[blockSize]; __shared__ float sharedDensity[blockSize]; __shared__ float sharedPressure[blockSize]; __shared__ glm::vec3 sharedVelocity[blockSize]; int index = threadIdx.x + (blockIdx.x * blockDim.x); int tid = threadIdx.x; int bid = blockIdx.x; int numberOfLoops = ceil((1.0f*N) / blockSize); glm::vec4 myPos; float myDens; float myPress; glm::vec3 myVel; if(index < N) { myPos = pos[index]; myDens = density[index]; myPress = pressure[index]; myVel = vel[index]; } glm::vec3 pressureAcc = glm::vec3(0); glm::vec3 viscosityAcc = glm::vec3(0); glm::vec3 surfaceAcc = glm::vec3(0); for(int i=0; i<numberOfLoops; i++) { // For this block'd data, bring it into shared memory int deltaIndex = ((i + bid)%numberOfLoops ); int delIndex = deltaIndex * blockSize + tid; if(delIndex < N) { sharedPositions[tid] = pos[delIndex]; sharedDensity[tid] = density[delIndex]; sharedPressure[tid] = pressure[delIndex]; sharedVelocity[tid] = vel[delIndex]; } __syncthreads(); // Traverse shared memory for(int j=0; j< blockSize; j++) { if( index < N && (deltaIndex * blockSize + j < N)) { glm::vec4 theirPos = sharedPositions[j]; float theirDens = sharedDensity[j]; float theirPress = sharedPressure[j]; glm::vec3 theirVel = sharedVelocity[j]; glm::vec3 r = glm::vec3(myPos.x-theirPos.x, myPos.y-theirPos.y, myPos.z-theirPos.z); float xij = glm::length(r); // Symmetrization based on Monaghan float pressureTerm = - theirPos.w * (myPress/(myDens*myDens) + theirPress/(theirDens*theirDens) ) * kernel_press_gradient(xij); if(pressureTerm == pressureTerm && fabs(myDens) > EPSIL && fabs(theirDens) > EPSIL) // NaN check pressureAcc += pressureTerm * r/(xij + EPSIL); if(myDens==myDens && theirDens == theirDens && fabs(myDens) > EPSIL && fabs(theirDens) > EPSIL) // NaN check viscosityAcc += VISCOSITY * theirPos.w * (theirVel - myVel)/(myDens * theirDens) * kernel_visc_laplacian(xij); } } __syncthreads(); } if(index < N) { acc[index] = pressureAcc + viscosityAcc + surfaceAcc + GRAVITY; //printf("%3d: %3.3f %3.3f %3.3f\n",index, acc[index].x, acc[index].y, acc[index].z); //printf("\tp: %3.3f %3.3f %3.3f \n", pressureAcc.x, pressureAcc.y, pressureAcc.z); //printf("\tv: %3.3f %3.3f %3.3f \n", viscosityAcc.x, viscosityAcc.y, viscosityAcc.z); } } __global__ void updateSPHExplicit(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if(index < N) { glm::vec3 newVel = vel[index] + dt * acc[index]; vel[index] = newVel; pos[index].x += dt * newVel.x; pos[index].y += dt * newVel.y; pos[index].z += dt * newVel.z; } } //Simple Euler integration scheme __global__ void updateF(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos = glm::vec4(0,0,0,1); if( index < N ) my_pos = pos[index]; glm::vec3 accel = ACC(N, my_pos, pos); if( index < N ) acc[index] = accel; } __global__ void updateP(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { vel[index] += acc[index] * dt; pos[index].x += vel[index].x * dt; pos[index].y += vel[index].y * dt; pos[index].z += vel[index].z * dt; } } __global__ void updateVelVerletPart1F(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos; glm::vec3 accel; if( index < N ) my_pos = pos[index]; accel = ACC(N,my_pos,pos); if( index < N ) acc[index] = accel; } __global__ void updateVelVerletPart1P(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { glm::vec3 deltaPos = dt * (vel[index] + dt * 0.5f * acc[index]); pos[index].x += deltaPos.x; pos[index].y += deltaPos.y; pos[index].z += deltaPos.z; } } __global__ void updateVelVerletPart2F(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc_2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); glm::vec4 my_pos; glm::vec3 accel; if( index < N ) my_pos = pos[index]; accel = ACC(N,my_pos,pos); if( index < N ) acc_2[index] = accel; } __global__ void updateVelVerletPart2P(int N, float dt, glm::vec4 * pos, glm::vec3 * vel, glm::vec3 * acc, glm::vec3 * acc_2) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { vel[index] += dt * 0.5f * (acc[index] + acc_2[index]); } } __global__ void handleCollisions(int N, glm::vec4 * pos, glm::vec3 * vel) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if( index < N ) { glm::vec4 myPos = pos[index]; glm::vec3 myVel = vel[index]; // Simple impulse collision handling // Down side, adds energy into the system // Consider stiff springs with collision buffers if( myPos.z < 0.0f) { myPos.z = EPSIL; myVel.z = -DRAG*myVel.z; } if( myPos.z > ZMAX) { myPos.z = ZMAX - EPSIL; myVel.z = -DRAG*myVel.z; } if(myPos.y < -YMAX) { myPos.y = -(YMAX - EPSIL); myVel.y = -DRAG*myVel.y; } if(myPos.y > YMAX) { myPos.y = YMAX - EPSIL; myVel.y = -DRAG*myVel.y; } if(myPos.x < -XMAX) { myPos.x = -(XMAX - EPSIL); myVel.x = -DRAG*myVel.x; } if(myPos.x > XMAX) { myPos.x = XMAX - EPSIL; myVel.x = -DRAG*myVel.x; } pos[index] = myPos; vel[index] = myVel; } } //Update the vertex buffer object //(The VBO is where OpenGL looks for the positions for the planets) __global__ void sendToVBO(int N, glm::vec4 * pos, float * vbo, int width, int height, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale_w = -2.0f / s_scale; float c_scale_h = -2.0f / s_scale; // z is negative in downward direction float c_scale_z = 2.0f / s_scale; if(index<N) { vbo[4*index+0] = pos[index].x*c_scale_w; vbo[4*index+1] = pos[index].y*c_scale_h; vbo[4*index+2] = pos[index].z*c_scale_z; vbo[4*index+3] = 1; } } //Update the texture pixel buffer object //(This texture is where openGL pulls the data for the height map) __global__ void sendToPBO(int N, glm::vec4 * pos, float4 * pbo, int width, int height, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); int x = index % width; int y = index / width; float w2 = width / 2.0; float h2 = height / 2.0; float c_scale_w = width / s_scale; float c_scale_h = height / s_scale; glm::vec3 color(0.05, 0.15, 0.3); glm::vec3 acc = ACC(N, glm::vec4((x-w2)/c_scale_w,(y-h2)/c_scale_h,0,1), pos); if(x<width && y<height) { float mag = sqrt(sqrt(acc.x*acc.x + acc.y*acc.y + acc.z*acc.z)); // Each thread writes one pixel location in the texture (textel) pbo[index].w = (mag < 1.0f) ? mag : 1.0f; } } /************************************* * Wrappers for the __global__ calls * *************************************/ //Initialize memory, update some globals void initCuda(int N) { numObjects = N; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); cudaMalloc((void**)&dev_pos, N*sizeof(glm::vec4)); checkCUDAErrorWithLine("Kernel failed!"); cudaMalloc((void**)&dev_vel, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); cudaMalloc((void**)&dev_acc, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); // SPH cudaMalloc((void**)&dev_density, N*sizeof(float)); checkCUDAErrorWithLine("Kernel failed!"); cudaMalloc((void**)&dev_pressure, N*sizeof(float)); checkCUDAErrorWithLine("Kernel failed!"); cudaMalloc((void**)&dev_force, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); // For velocityVerlet cudaMalloc((void**)&dev_acc_2, N*sizeof(glm::vec3)); checkCUDAErrorWithLine("Kernel failed!"); generateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("Kernel failed!"); generateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("Kernel failed!"); } void freeCuda(int N) { cudaFree(dev_pos); cudaFree(dev_vel); cudaFree(dev_acc); cudaFree(dev_density); cudaFree(dev_pressure); cudaFree(dev_force); cudaFree(dev_acc_2); } void resetSim(int N) { numObjects = N; dim3 fullBlocksPerGrid((int)ceil(float(N)/float(blockSize))); generateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("Kernel failed!"); generateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("Kernel failed!"); } void cudaCollisionsWrapper() { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); handleCollisions<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, dev_vel); checkCUDAErrorWithLine("Kernel failed!"); } void cudaNBodyUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); updateF<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc); updateP<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); cudaCollisionsWrapper(); } void cudaNBodyUpdateVelocityVerletWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); updateVelVerletPart1F<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); updateVelVerletPart1P<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc); checkCUDAErrorWithLine("Kernel failed!"); updateVelVerletPart2F<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc_2); checkCUDAErrorWithLine("Kernel failed!"); updateVelVerletPart2P<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc, dev_acc_2); checkCUDAErrorWithLine("Kernel failed!"); cudaCollisionsWrapper(); } void cudaSPHUpdateWrapper(float dt) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); // Calculate Density and Pressures calculateSPHDensityPressure<<<fullBlocksPerGrid, blockSize>>>(numObjects,dev_pos,dev_density,dev_pressure); checkCUDAErrorWithLine("Kernel failed!"); // Calculate Accelerations calculateSPHForces<<<fullBlocksPerGrid, blockSize>>>(numObjects,dev_pos,dev_vel,dev_density,dev_pressure,dev_acc); checkCUDAErrorWithLine("Kernel failed!"); // Update Positions and Velocities updateSPHExplicit<<<fullBlocksPerGrid, blockSize>>>(numObjects,dt,dev_pos,dev_vel,dev_acc); checkCUDAErrorWithLine("Kernel failed!"); // Check for collisions with boundaries cudaCollisionsWrapper(); checkCUDAErrorWithLine("Kernel failed!"); } void cudaUpdateVBO(float * vbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects)/float(blockSize))); sendToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, vbodptr, width, height, scene_scale); checkCUDAErrorWithLine("Kernel failed!"); } void cudaUpdatePBO(float4 * pbodptr, int width, int height) { dim3 fullBlocksPerGrid((int)ceil(float(width*height)/float(blockSize))); sendToPBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, pbodptr, width, height, scene_scale); checkCUDAErrorWithLine("Kernel failed!"); }
f5dd02a12cfdec07d38d9a92bd9a42ed4652708c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This example demonstrates how to use the Cuda OpenGL bindings with the * runtime API. * Device code. */ // #ifndef _SIMPLEGL_KERNEL_H_ #define _SIMPLEGL_KERNEL_H_ // true if in front __device__ bool IsPointInFrontOfPlane(float4 point, float4 plane, float eps) { bool ret = false; float d = point.x*plane.x + point.y*plane.y + point.z*plane.z + plane.w; if ( (d + eps) > 0 ) { ret = true; } return ret; } __device__ bool IsPointInSphere(float4 point, float4 center, float radius, float eps) { bool ret = false; float4 c_to_p = make_float4( point.x - center.x, point.y - center.y, point.z - center.z, 0.0f); float d = sqrt( c_to_p.x * c_to_p.x + c_to_p.y * c_to_p.y + c_to_p.z * c_to_p.z ); if ( (d + eps) < radius ) { ret = true; } return ret; } // evalue y=x*x-z*z __device__ bool IsPointAboveMonkeySaddleY(float4 point, float4 origin, float height, float eps) { bool ret = false; // assume it fills the space for now float h = point.x * point.x - point.y*point.y; if ( h < point.z ) { ret = true; } return ret; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 plane,float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointInFrontOfPlane( old_pos, plane, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,1.0f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel_clip_sphere(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 center, float radius, float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointInSphere( old_pos, center, radius, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,0.1f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel_clip_surface_Y_saddle(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 origin, float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointAboveMonkeySaddleY( old_pos, origin, elevation, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,0.1f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_kernel(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 plane, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, pos, vel, mesh_width, mesh_height, time, init, plane, elevation, bWavy); } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_clip_sphere(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 center, float radius, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( kernel_clip_sphere), dim3(grid), dim3(block), 0, 0, pos, vel,mesh_width, mesh_height, time, init, center, radius, elevation, bWavy); } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_clip_surface_Y_saddle(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 origin, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); hipLaunchKernelGGL(( kernel_clip_surface_Y_saddle), dim3(grid), dim3(block), 0, 0, pos, vel, mesh_width, mesh_height, time, init, origin, elevation, bWavy); } #endif // #ifndef _SIMPLEGL_KERNEL_H_
f5dd02a12cfdec07d38d9a92bd9a42ed4652708c.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* This example demonstrates how to use the Cuda OpenGL bindings with the * runtime API. * Device code. */ // #ifndef _SIMPLEGL_KERNEL_H_ #define _SIMPLEGL_KERNEL_H_ // true if in front __device__ bool IsPointInFrontOfPlane(float4 point, float4 plane, float eps) { bool ret = false; float d = point.x*plane.x + point.y*plane.y + point.z*plane.z + plane.w; if ( (d + eps) > 0 ) { ret = true; } return ret; } __device__ bool IsPointInSphere(float4 point, float4 center, float radius, float eps) { bool ret = false; float4 c_to_p = make_float4( point.x - center.x, point.y - center.y, point.z - center.z, 0.0f); float d = sqrt( c_to_p.x * c_to_p.x + c_to_p.y * c_to_p.y + c_to_p.z * c_to_p.z ); if ( (d + eps) < radius ) { ret = true; } return ret; } // evalue y=x*x-z*z __device__ bool IsPointAboveMonkeySaddleY(float4 point, float4 origin, float height, float eps) { bool ret = false; // assume it fills the space for now float h = point.x * point.x - point.y*point.y; if ( h < point.z ) { ret = true; } return ret; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 plane,float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointInFrontOfPlane( old_pos, plane, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,1.0f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel_clip_sphere(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 center, float radius, float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointInSphere( old_pos, center, radius, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,0.1f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void kernel_clip_surface_Y_saddle(float4* pos, float4* vel,unsigned int width, unsigned int height, float time, bool init, float4 origin, float elevation, bool bWavy) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if( init ) { if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } vel[y*width+x] = make_float4( 0, 255.0f, 0, 1); return; } else { float4 old_pos = pos[y*width+x]; float4 old_vel = vel[y*width+x]; if( IsPointAboveMonkeySaddleY( old_pos, origin, elevation, 0.000001 )) { //pos[y*width+x] = new_pos; if( old_vel.x != 255.0f ) // point entered, add some blue { vel[y*width+x] = make_float4(255.0f,0.0f,255.0f,1.0f); } else { vel[y*width+x] = make_float4(255.0f,0.0f,0.0f,1.0f); } } else { vel[y*width+x] = make_float4(0.0f,255.0f,0.0f,0.1f); } if( bWavy ) { pos[y*width+x] = make_float4( u, v, w + elevation, 1 ); } else { pos[y*width+x] = make_float4( u, v, elevation, 1 ); } return; } return; } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_kernel(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 plane, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); kernel<<< grid, block>>>(pos, vel, mesh_width, mesh_height, time, init, plane, elevation, bWavy); } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_clip_sphere(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 center, float radius, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); kernel_clip_sphere<<< grid, block>>>( pos, vel,mesh_width, mesh_height, time, init, center, radius, elevation, bWavy); } // Wrapper for the __global__ call that sets up the kernel call extern "C" void launch_clip_surface_Y_saddle(float4* pos, float4* vel, unsigned int mesh_width, unsigned int mesh_height, float time, bool init, float4 origin, float elevation, bool bWavy) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); kernel_clip_surface_Y_saddle<<< grid, block>>>(pos, vel, mesh_width, mesh_height, time, init, origin, elevation, bWavy); } #endif // #ifndef _SIMPLEGL_KERNEL_H_
c07f57a6e3f6e98cc3300521b5c63c3d1a4ba4cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; __global__ void calculate_dbias_kernel( int n, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = grad_output[index]; atomicAdd(grad_bias + c_col, value); } }
c07f57a6e3f6e98cc3300521b5c63c3d1a4ba4cf.cu
#include "includes.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; __global__ void calculate_dbias_kernel( int n, const float* grad_output, float* grad_bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = grad_output[index]; atomicAdd(grad_bias + c_col, value); } }
7882cadaceb7135e2a7de04839b58b027ba0de2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <vector> #include <string> #include <algorithm> #include <cmath> #include <string> #include <iostream> #include <fstream> #include "containers.h" #include "gravity.h" #include "helper_math.h" /* I know that I messed up somewhere up to ex10 b) but I still continued with the rest, of course it is hard to debug... also: Daint was hardly executing my code, was too jammed. I ran out of time to do more.... */ //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel(const float3* __restrict__ coordinates, float3* forces, int n, float L, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it float3 f = make_float3(0); for (int i=0; i<n; i++) if (i != pid) f += interaction(dst, coordinates[i], L); // Write back the force forces[pid] = f; } template<typename Interaction> void nbodyNaive(hipStream_t stream, int L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; hipLaunchKernelGGL(( nbodyNaiveKernel), dim3(nblocks), dim3(nthreads), 0, stream , coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } //======================================================================================================================= // One thread per particle + shared memory //======================================================================================================================= template<typename Interaction> __global__ void nbodySharedKernel(const float3* coordinates, float3* forces, int n, float L, Interaction interaction) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Use shared memory to cache the source particles extern __shared__ float3 cache[]; // Since threads will fill shared memory, all of them in the block must be active // But only valid threads should load destination particles float3 dst = pid < n ? coordinates[pid] : make_float3(0); // Loop over all the other particles, compute the force and accumulate it float3 f = make_float3(0); for (int i=0; i<n; i += blockDim.x) { // All the threads in a block read in a coalesced manner into the shared memory if (i+threadIdx.x < n) cache[threadIdx.x] = coordinates[i+threadIdx.x]; // Wait untill all the warps in the block are done __syncthreads(); // Use the cached values in the shared memory to compute the interactions #pragma unroll 9 for (int j=0; j<min(blockDim.x, n-i); j++) if (pid != i+j) // interact: f += interaction(dst, cache[j], L); // Again wait until all the warps are done before moving on __syncthreads(); } // If the id is valid, write back the force if (pid < n) forces[pid] = f; } template<typename Interaction> void nbodyShared(hipStream_t stream, float L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; // Allocate shared memory: nthreads*sizeof(float3) PER BLOCK hipLaunchKernelGGL(( nbodySharedKernel), dim3(nblocks), dim3(nthreads), nthreads*sizeof(float3), stream , coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } //======================================================================================================================= // One thread per N particles + shared memory + split //======================================================================================================================= template<int nDestParticles, typename Interaction> __global__ void nbodySharedPlusILPKernel(const float3* coordinates, float3* forces, int n, float L, Interaction interaction) { const int chunkId = blockIdx.y; const int startId = blockIdx.x * blockDim.x + threadIdx.x; const int dstStart = startId * nDestParticles; float3 dsts[nDestParticles], f[nDestParticles]; extern __shared__ float3 cache[]; for (int i=0; i<nDestParticles; i++) f[i] = make_float3(0.0f, 0.0f, 0.0f); for (int i=0; i<nDestParticles; i++) if (dstStart+i < n) dsts[i] = coordinates[dstStart + i]; const int chSize = (n+gridDim.y-1) / gridDim.y; const int start = chunkId*chSize; const int end = min( (chunkId+1)*chSize, n ); for (int i = start; i < end; i+=blockDim.x) { if (i+threadIdx.x < n) cache[threadIdx.x] = coordinates[i+threadIdx.x]; __syncthreads(); #pragma unroll 4 for (int j=0; j<min(blockDim.x, end-i); j++) { const float3 src = cache[j]; for (int d=0; d<nDestParticles; d++) if ( dstStart + d != i+j ) f[d] += interaction(dsts[d], src, L); } __syncthreads(); } for (int i=0; i<nDestParticles; i++) { atomicAdd(&forces[dstStart + i].x, f[i].x); atomicAdd(&forces[dstStart + i].y, f[i].y); atomicAdd(&forces[dstStart + i].z, f[i].z); } } template<typename Interaction> void nbodySharedPlusILP(hipStream_t stream, float L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); const int ndsts = 3; const int nthreads = 128; const int nblocks = ( (nparticles + ndsts-1) / ndsts + nthreads-1 ) / nthreads; const dim3 nthreads3(nthreads, 1, 1); const dim3 nblocks3(nblocks, 10, 1); forces.clearDevice(0); hipLaunchKernelGGL(( nbodySharedPlusILPKernel<ndsts>) , dim3(nblocks3), dim3(nthreads3), nthreads*sizeof(float3), stream , coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } template<typename Interaction> std::pair<double, double> diffNorms(const float3* coordinates, float3* forces, int nparticles, float L, int nchecks, Interaction interaction) { const int stride = max(1, nparticles / nchecks); double linf = 0, l2 = 0; #pragma omp parallel for reduction(+:l2) reduction(max:linf) for (int i=0; i<nparticles; i+=stride) { double3 totalF = make_double3(0, 0, 0); for (int j=0; j<nparticles; j++) if (i != j) { float3 curF = interaction(coordinates[i], coordinates[j], L); totalF.x += curF.x; totalF.y += curF.y; totalF.z += curF.z; } double3 relDiff; relDiff.x = (totalF.x - forces[i].x) / totalF.x; relDiff.y = (totalF.y - forces[i].y) / totalF.y; relDiff.z = (totalF.z - forces[i].z) / totalF.z; linf = ::max({ linf, fabs(relDiff.x), fabs(relDiff.y), fabs(relDiff.z) }); l2 += relDiff.x*relDiff.x + relDiff.y*relDiff.y + relDiff.z*relDiff.z; //printf("Particle %d: reference [%f %f %f], got [%f %f %f]\n", i, f.x, f.y, f.z, forces[i].x, forces[i].y, forces[i].z); } return { linf, sqrt(l2) / nchecks }; } template<typename Kernel, typename Interaction> void runCheckReport( float L, // domain size PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, int nchecks, int nrepetitions, std::string kernelName, Kernel kernel, Interaction interaction) { // Check for input consistency assert(coordinates.size() == forces.size()); const int nparticles = coordinates.size(); // Total execution time of the kernel float totalTime = 0; // Clear the forces forces.clear(0); // Allocate CUDA events to measure kernel runtime hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Compute the forces on the GPU // Do it several times for more precise timings for (int i=0; i<nrepetitions; i++) { hipEventRecord(start); kernel(L, coordinates, forces, interaction); hipEventRecord(stop); hipEventSynchronize(stop); float ms = 0; hipEventElapsedTime(&ms, start, stop); totalTime += ms; } coordinates.downloadFromDevice(0); forces. downloadFromDevice(0); // Perform check against CPU auto errs = diffNorms(coordinates.hostPtr(), forces.hostPtr(), nparticles, L, nchecks, interaction); printf("Kernel '%s' statistics:\n avg runtime: %.3fms\n errors: Linf: %e, L2 %e\n\n", kernelName.c_str(), totalTime / nrepetitions, errs.first, errs.second); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= __global__ void nbodyNaiveKernel_pos(float3* coordinates, const float3* forces, int n, const float3* velocity, const float dt) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 r_old = coordinates[pid]; float3 v_old = velocity[pid]; float3 a_old = forces[pid]; // save: coordinates[pid] = r_old + v_old*dt + 0.5*a_old*dt*dt; } void nbody_posKernel(float dt, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, PinnedBuffer<float3>& velocity) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; hipLaunchKernelGGL(( nbodyNaiveKernel_pos), dim3(nblocks), dim3(nthreads) , 0, 0, coordinates.devPtr(), forces.devPtr(), nparticles, velocity.devPtr(), dt); } __global__ void nbodyNaiveKernel_velo(const float3* old_forces, const float3* new_forces, int n, float3* velocity, const float dt) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 a_old = old_forces[pid]; float3 a_new = new_forces[pid]; float3 v_old = velocity[pid]; // save: velocity[pid] = v_old + 0.5*(a_old + a_new)*dt; } void nbody_veloKernel(hipStream_t stream, float dt, PinnedBuffer<float3>& old_forces, PinnedBuffer<float3>& new_forces, PinnedBuffer<float3>& velocity) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; hipLaunchKernelGGL(( nbodyNaiveKernel_velo), dim3(nblocks), dim3(nthreads), 0, stream, old_forces.devPtr(), new_forces.devPtr(), nparticles, velocity.devPtr(), dt); } // ------------------------------- EX10: __global__ void nbodyNaiveKernel_Ekin(int n, const float3* velocity, float* Ekin_tot) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float ek_loc = 0; if (pid < n) { float3 v = velocity[pid]; ek_loc = dot(v,v)/2.0; } // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) ek_loc += __shfl_xor(ek_loc, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(Ekin_tot, ek_loc); } } void nbodyKernel_Ekin(hipStream_t stream, PinnedBuffer<float3>& velocity, PinnedBuffer<float>& Ekin_tot) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; Ekin_tot.clearDevice(0); hipLaunchKernelGGL(( nbodyNaiveKernel_Ekin), dim3(nblocks), dim3(nthreads), 0, stream , nparticles, velocity.devPtr(), Ekin_tot.devPtr()); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel_Epot(const float3* __restrict__ coordinates, float* Epot_total, int n, float L, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float Epot_local = 0; if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it for (int i=0; i<n; i++) if (i > pid) Epot_local += interaction.energy(dst, coordinates[i], L); // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) Epot_local += __shfl_xor(Epot_local, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(Epot_total, Epot_local); } } template<typename Interaction> void nbodyNaive_Epot(hipStream_t stream, int L, const PinnedBuffer<float3>& coordinates, PinnedBuffer<float>& Epot_total, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; Epot_total.clearDevice(0); hipLaunchKernelGGL(( nbodyNaiveKernel_Epot), dim3(nblocks), dim3(nthreads), 0, stream , coordinates.devPtr(), Epot_total.devPtr(), nparticles, L, interaction); } __global__ void nbodyNaiveKernel_rescaleVelocities(int n, float3* velocity, const float* scaleFactor) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 v_old = velocity[pid]; // save: velocity[pid] = v_old*scaleFactor[0]; } void nbodyKernel_rescaleVelocities(PinnedBuffer<float3>& velocity, PinnedBuffer<float>& scaleFactor) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; hipLaunchKernelGGL(( nbodyNaiveKernel_rescaleVelocities), dim3(nblocks), dim3(nthreads), 0, 0, nparticles, velocity.devPtr(), scaleFactor.devPtr()); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel_FrSum(const float3* __restrict__ coordinates, int n, float L, float* sum, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float sum_local = 0; if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it for (int i=0; i<n; i++) if (i > pid) sum_local += interaction.Fr(dst, coordinates[i], L); // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) sum_local += __shfl_xor(sum_local, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(sum, sum_local); } } template<typename Interaction> void nbodyNaive_FrSum(hipStream_t stream, int L, const PinnedBuffer<float3>& coordinates, PinnedBuffer<float>& sum, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; sum.clearDevice(0); hipLaunchKernelGGL(( nbodyNaiveKernel_FrSum), dim3(nblocks), dim3(nthreads), 0, stream , coordinates.devPtr(), nparticles, L,sum.devPtr(), interaction); } void init_data( PinnedBuffer<float3> &coords, PinnedBuffer<float3> &velocity, PinnedBuffer<float3> &forces, const int n, const float L ) { const int n_side = ::pow(n, 1.0/3.0)+1; const float dl = L / (float)n_side; for (size_t x = 0; x < n_side; x++) { for (size_t y = 0; y < n_side; y++) { for (size_t z = 0; z < n_side; z++) { int id = (x*n_side + y)*n_side + z; if (id<n) { coords[id] = make_float3(x*dl, y*dl, z*dl); velocity[id] = make_float3(0,0,0); forces[id] = make_float3(0,0,0); } } } } } void saveData(std::string fileName, const float3* data, const int n) { std::ofstream vfile; vfile.open (fileName); vfile << n << "\n"; vfile << "# comment line\n"; for (size_t i = 0; i < n; i++) { vfile << "0 " << data[i].x << " " << data[i].y << " " << data[i].z << "\n"; } vfile.close(); } template<typename Interaction> void runSimulation( PinnedBuffer<float3> &coordinates, PinnedBuffer<float3> &velocity, PinnedBuffer<float3> &forces, const int n, const float L, const float dt, const float T, Interaction f_interaction, const float Temp0 ) { // Check for input consistency assert(coordinates.size() == forces.size()); assert(coordinates.size() == velocity.size()); const int nparticles = coordinates.size(); PinnedBuffer<float3> temp_forces(n); PinnedBuffer<float> Epot_total(1); PinnedBuffer<float> Ekin_total(1); PinnedBuffer<float> Temp_scale_factor(1); PinnedBuffer<float> FrSum(1); // Total execution time of the kernel float totalTime = 0;//gpu // Allocate CUDA events to measure kernel runtime hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipStream_t streamCompute, streamDataload; hipStreamCreate(&streamCompute); hipStreamCreate(&streamDataload); float t = 0;// algo int step_counter = 0; while (t < T) { // update r (coordinates): hipDeviceSynchronize(); nbody_posKernel(dt, coordinates, forces, velocity); hipDeviceSynchronize(); if (false) { coordinates.downloadFromDevice(0); printf("coordinates[0]: %.4f, %.4f, %.4f\n\n", coordinates[0].x, coordinates[0].y, coordinates[0].z); } // issue coordinate loading: if (step_counter % 100 == 0) { coordinates.downloadFromDevice(streamDataload, false); // no synch } // get new forces: nbodyShared<decltype(f_interaction)>(streamCompute, L, coordinates, temp_forces, f_interaction); if (false) { forces.downloadFromDevice(0); printf("force[0]: %.4f, %.4f, %.4f\n\n", forces[0].x, forces[0].y, forces[0].z); temp_forces.downloadFromDevice(0); printf("temp_forces[0]: %.4f, %.4f, %.4f\n\n", temp_forces[0].x, temp_forces[0].y, temp_forces[0].z); } // update velocity: nbody_veloKernel(streamCompute, dt, forces, temp_forces, velocity); if (false) { velocity.downloadFromDevice(0); printf("velocity[0]: %.4f, %.4f, %.4f\n\n", velocity[0].x, velocity[0].y, velocity[0].z); } // swap forces: std::swap(forces, temp_forces); t += dt; if (step_counter % 100 == 0) { // calculate energy: nbodyNaive_Epot(streamCompute, L, coordinates, Epot_total, f_interaction); nbodyKernel_Ekin(streamCompute, velocity, Ekin_total); // get values from GPU: Epot_total.downloadFromDevice(0); Ekin_total.downloadFromDevice(0); printf("t: %.4f\n\n", t); printf("Epot: %.4f, Ekin: %.4f, E: %.4f\n\n", Epot_total[0], Ekin_total[0], Epot_total[0]+Ekin_total[0]); // data already loaded ! hipDeviceSynchronize(); saveData("dump_" + std::to_string(step_counter) + ".txt", coordinates.hostPtr(), n); } if (step_counter % 10 == 9) { // do temperature control: nbodyKernel_Ekin(streamCompute, velocity, Ekin_total); Ekin_total.downloadFromDevice(0); const float TempCurr = 2.0/(3.0 * n)*Ekin_total[0]; const float rescale_factor = std::sqrt(Temp0 / TempCurr); printf("rescale_factor: %.4f\n\n", rescale_factor); // sqrt because v is squared for energy. // go scale velocities: Temp_scale_factor[0] = rescale_factor; Temp_scale_factor.uploadToDevice(0); nbodyKernel_rescaleVelocities(velocity, Temp_scale_factor); /// print pressure before scaling: nbodyNaive_FrSum(streamCompute, L, coordinates, FrSum, f_interaction); FrSum.downloadFromDevice(0); const float V = L*L*L; const float pressure = 1.0 * TempCurr * n / V + 1.0/(3.0*V)*FrSum[0]; printf("pressure: %.4f\n\n", pressure); } step_counter++; } coordinates.downloadFromDevice(0); forces. downloadFromDevice(0); velocity. downloadFromDevice(0); printf("runtime: %.3fms\n\n", totalTime); } int main(int argc, char** argv) { int n = 50000; float L = 10; float dt = 0.000001;//0.0001; float T = 1.0; float epsilon = 0.1; float sigma = 0.5; if (argc > 1) { n = atoi(argv[1]); assert(n > 0); } if (argc > 2) { epsilon = std::stof(argv[2]); assert(epsilon > 0); } if (argc > 3) { sigma = std::stof(argv[3]); assert(sigma > 0); } PinnedBuffer<float3> coordinates(n), forces(n), velocity(n); init_data(coordinates, velocity, forces, n, L); // Transfer data to the GPU coordinates.uploadToDevice(0); velocity.uploadToDevice(0); forces.uploadToDevice(0); //Pairwise_Gravity gravity(10.0); Pairwise_LJ ljforce( epsilon, sigma ); runSimulation( coordinates, velocity, forces, n, L, dt, T, ljforce, 10.0// Temp0 -> a complete guess. ); return 0; }
7882cadaceb7135e2a7de04839b58b027ba0de2d.cu
#include <cstdio> #include <vector> #include <string> #include <algorithm> #include <cmath> #include <string> #include <iostream> #include <fstream> #include "containers.h" #include "gravity.h" #include "helper_math.h" /* I know that I messed up somewhere up to ex10 b) but I still continued with the rest, of course it is hard to debug... also: Daint was hardly executing my code, was too jammed. I ran out of time to do more.... */ //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel(const float3* __restrict__ coordinates, float3* forces, int n, float L, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it float3 f = make_float3(0); for (int i=0; i<n; i++) if (i != pid) f += interaction(dst, coordinates[i], L); // Write back the force forces[pid] = f; } template<typename Interaction> void nbodyNaive(cudaStream_t stream, int L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; nbodyNaiveKernel<<< nblocks, nthreads, 0, stream >>> (coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } //======================================================================================================================= // One thread per particle + shared memory //======================================================================================================================= template<typename Interaction> __global__ void nbodySharedKernel(const float3* coordinates, float3* forces, int n, float L, Interaction interaction) { const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Use shared memory to cache the source particles extern __shared__ float3 cache[]; // Since threads will fill shared memory, all of them in the block must be active // But only valid threads should load destination particles float3 dst = pid < n ? coordinates[pid] : make_float3(0); // Loop over all the other particles, compute the force and accumulate it float3 f = make_float3(0); for (int i=0; i<n; i += blockDim.x) { // All the threads in a block read in a coalesced manner into the shared memory if (i+threadIdx.x < n) cache[threadIdx.x] = coordinates[i+threadIdx.x]; // Wait untill all the warps in the block are done __syncthreads(); // Use the cached values in the shared memory to compute the interactions #pragma unroll 9 for (int j=0; j<min(blockDim.x, n-i); j++) if (pid != i+j) // interact: f += interaction(dst, cache[j], L); // Again wait until all the warps are done before moving on __syncthreads(); } // If the id is valid, write back the force if (pid < n) forces[pid] = f; } template<typename Interaction> void nbodyShared(cudaStream_t stream, float L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; // Allocate shared memory: nthreads*sizeof(float3) PER BLOCK nbodySharedKernel<<< nblocks, nthreads, nthreads*sizeof(float3), stream >>> (coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } //======================================================================================================================= // One thread per N particles + shared memory + split //======================================================================================================================= template<int nDestParticles, typename Interaction> __global__ void nbodySharedPlusILPKernel(const float3* coordinates, float3* forces, int n, float L, Interaction interaction) { const int chunkId = blockIdx.y; const int startId = blockIdx.x * blockDim.x + threadIdx.x; const int dstStart = startId * nDestParticles; float3 dsts[nDestParticles], f[nDestParticles]; extern __shared__ float3 cache[]; for (int i=0; i<nDestParticles; i++) f[i] = make_float3(0.0f, 0.0f, 0.0f); for (int i=0; i<nDestParticles; i++) if (dstStart+i < n) dsts[i] = coordinates[dstStart + i]; const int chSize = (n+gridDim.y-1) / gridDim.y; const int start = chunkId*chSize; const int end = min( (chunkId+1)*chSize, n ); for (int i = start; i < end; i+=blockDim.x) { if (i+threadIdx.x < n) cache[threadIdx.x] = coordinates[i+threadIdx.x]; __syncthreads(); #pragma unroll 4 for (int j=0; j<min(blockDim.x, end-i); j++) { const float3 src = cache[j]; for (int d=0; d<nDestParticles; d++) if ( dstStart + d != i+j ) f[d] += interaction(dsts[d], src, L); } __syncthreads(); } for (int i=0; i<nDestParticles; i++) { atomicAdd(&forces[dstStart + i].x, f[i].x); atomicAdd(&forces[dstStart + i].y, f[i].y); atomicAdd(&forces[dstStart + i].z, f[i].z); } } template<typename Interaction> void nbodySharedPlusILP(cudaStream_t stream, float L, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, Interaction interaction) { int nparticles = coordinates.size(); const int ndsts = 3; const int nthreads = 128; const int nblocks = ( (nparticles + ndsts-1) / ndsts + nthreads-1 ) / nthreads; const dim3 nthreads3(nthreads, 1, 1); const dim3 nblocks3(nblocks, 10, 1); forces.clearDevice(0); nbodySharedPlusILPKernel<ndsts> <<< nblocks3, nthreads3, nthreads*sizeof(float3), stream >>> ( coordinates.devPtr(), forces.devPtr(), nparticles, L, interaction); } template<typename Interaction> std::pair<double, double> diffNorms(const float3* coordinates, float3* forces, int nparticles, float L, int nchecks, Interaction interaction) { const int stride = max(1, nparticles / nchecks); double linf = 0, l2 = 0; #pragma omp parallel for reduction(+:l2) reduction(max:linf) for (int i=0; i<nparticles; i+=stride) { double3 totalF = make_double3(0, 0, 0); for (int j=0; j<nparticles; j++) if (i != j) { float3 curF = interaction(coordinates[i], coordinates[j], L); totalF.x += curF.x; totalF.y += curF.y; totalF.z += curF.z; } double3 relDiff; relDiff.x = (totalF.x - forces[i].x) / totalF.x; relDiff.y = (totalF.y - forces[i].y) / totalF.y; relDiff.z = (totalF.z - forces[i].z) / totalF.z; linf = std::max({ linf, fabs(relDiff.x), fabs(relDiff.y), fabs(relDiff.z) }); l2 += relDiff.x*relDiff.x + relDiff.y*relDiff.y + relDiff.z*relDiff.z; //printf("Particle %d: reference [%f %f %f], got [%f %f %f]\n", i, f.x, f.y, f.z, forces[i].x, forces[i].y, forces[i].z); } return { linf, sqrt(l2) / nchecks }; } template<typename Kernel, typename Interaction> void runCheckReport( float L, // domain size PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, int nchecks, int nrepetitions, std::string kernelName, Kernel kernel, Interaction interaction) { // Check for input consistency assert(coordinates.size() == forces.size()); const int nparticles = coordinates.size(); // Total execution time of the kernel float totalTime = 0; // Clear the forces forces.clear(0); // Allocate CUDA events to measure kernel runtime cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Compute the forces on the GPU // Do it several times for more precise timings for (int i=0; i<nrepetitions; i++) { cudaEventRecord(start); kernel(L, coordinates, forces, interaction); cudaEventRecord(stop); cudaEventSynchronize(stop); float ms = 0; cudaEventElapsedTime(&ms, start, stop); totalTime += ms; } coordinates.downloadFromDevice(0); forces. downloadFromDevice(0); // Perform check against CPU auto errs = diffNorms(coordinates.hostPtr(), forces.hostPtr(), nparticles, L, nchecks, interaction); printf("Kernel '%s' statistics:\n avg runtime: %.3fms\n errors: Linf: %e, L2 %e\n\n", kernelName.c_str(), totalTime / nrepetitions, errs.first, errs.second); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= __global__ void nbodyNaiveKernel_pos(float3* coordinates, const float3* forces, int n, const float3* velocity, const float dt) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 r_old = coordinates[pid]; float3 v_old = velocity[pid]; float3 a_old = forces[pid]; // save: coordinates[pid] = r_old + v_old*dt + 0.5*a_old*dt*dt; } void nbody_posKernel(float dt, PinnedBuffer<float3>& coordinates, PinnedBuffer<float3>& forces, PinnedBuffer<float3>& velocity) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; nbodyNaiveKernel_pos<<< nblocks, nthreads >>> (coordinates.devPtr(), forces.devPtr(), nparticles, velocity.devPtr(), dt); } __global__ void nbodyNaiveKernel_velo(const float3* old_forces, const float3* new_forces, int n, float3* velocity, const float dt) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 a_old = old_forces[pid]; float3 a_new = new_forces[pid]; float3 v_old = velocity[pid]; // save: velocity[pid] = v_old + 0.5*(a_old + a_new)*dt; } void nbody_veloKernel(cudaStream_t stream, float dt, PinnedBuffer<float3>& old_forces, PinnedBuffer<float3>& new_forces, PinnedBuffer<float3>& velocity) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; nbodyNaiveKernel_velo<<< nblocks, nthreads, 0, stream>>> (old_forces.devPtr(), new_forces.devPtr(), nparticles, velocity.devPtr(), dt); } // ------------------------------- EX10: __global__ void nbodyNaiveKernel_Ekin(int n, const float3* velocity, float* Ekin_tot) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float ek_loc = 0; if (pid < n) { float3 v = velocity[pid]; ek_loc = dot(v,v)/2.0; } // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) ek_loc += __shfl_xor(ek_loc, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(Ekin_tot, ek_loc); } } void nbodyKernel_Ekin(cudaStream_t stream, PinnedBuffer<float3>& velocity, PinnedBuffer<float>& Ekin_tot) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; Ekin_tot.clearDevice(0); nbodyNaiveKernel_Ekin<<< nblocks, nthreads, 0, stream >>> (nparticles, velocity.devPtr(), Ekin_tot.devPtr()); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel_Epot(const float3* __restrict__ coordinates, float* Epot_total, int n, float L, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float Epot_local = 0; if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it for (int i=0; i<n; i++) if (i > pid) Epot_local += interaction.energy(dst, coordinates[i], L); // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) Epot_local += __shfl_xor(Epot_local, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(Epot_total, Epot_local); } } template<typename Interaction> void nbodyNaive_Epot(cudaStream_t stream, int L, const PinnedBuffer<float3>& coordinates, PinnedBuffer<float>& Epot_total, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; Epot_total.clearDevice(0); nbodyNaiveKernel_Epot<<< nblocks, nthreads, 0, stream >>> (coordinates.devPtr(), Epot_total.devPtr(), nparticles, L, interaction); } __global__ void nbodyNaiveKernel_rescaleVelocities(int n, float3* velocity, const float* scaleFactor) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread if (pid >= n) return; // Load float3 v_old = velocity[pid]; // save: velocity[pid] = v_old*scaleFactor[0]; } void nbodyKernel_rescaleVelocities(PinnedBuffer<float3>& velocity, PinnedBuffer<float>& scaleFactor) { int nparticles = velocity.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; nbodyNaiveKernel_rescaleVelocities<<< nblocks, nthreads>>> (nparticles, velocity.devPtr(), scaleFactor.devPtr()); } //======================================================================================================================= // Naive: one thread per particle //======================================================================================================================= template<typename Interaction> __global__ void nbodyNaiveKernel_FrSum(const float3* __restrict__ coordinates, int n, float L, float* sum, Interaction interaction) { // Get unique id of the thread const int pid = blockIdx.x * blockDim.x + threadIdx.x; const int laneid = threadIdx.x % 32; // Thread id is mapped onto particle id // If the id >= than the total number of particles, just exit that thread float sum_local = 0; if (pid >= n) return; // Load particle coordinates float3 dst = coordinates[pid]; // Loop over all the other particles, compute the force and accumulate it for (int i=0; i<n; i++) if (i > pid) sum_local += interaction.Fr(dst, coordinates[i], L); // sum within warp: #pragma unroll for(int mask = 32 / 2 ; mask > 0 ; mask >>= 1) sum_local += __shfl_xor(sum_local, mask); // The ek_loc variable of laneid 0 contains the reduction. if (laneid == 0) { // write back: atomicAdd(sum, sum_local); } } template<typename Interaction> void nbodyNaive_FrSum(cudaStream_t stream, int L, const PinnedBuffer<float3>& coordinates, PinnedBuffer<float>& sum, Interaction interaction) { int nparticles = coordinates.size(); // Use 4 warps in a block, calculate number of blocks, // such that total number of threads is >= than number of particles const int nthreads = 128; const int nblocks = (nparticles + nthreads - 1) / nthreads; sum.clearDevice(0); nbodyNaiveKernel_FrSum<<< nblocks, nthreads, 0, stream >>> (coordinates.devPtr(), nparticles, L,sum.devPtr(), interaction); } void init_data( PinnedBuffer<float3> &coords, PinnedBuffer<float3> &velocity, PinnedBuffer<float3> &forces, const int n, const float L ) { const int n_side = std::pow(n, 1.0/3.0)+1; const float dl = L / (float)n_side; for (size_t x = 0; x < n_side; x++) { for (size_t y = 0; y < n_side; y++) { for (size_t z = 0; z < n_side; z++) { int id = (x*n_side + y)*n_side + z; if (id<n) { coords[id] = make_float3(x*dl, y*dl, z*dl); velocity[id] = make_float3(0,0,0); forces[id] = make_float3(0,0,0); } } } } } void saveData(std::string fileName, const float3* data, const int n) { std::ofstream vfile; vfile.open (fileName); vfile << n << "\n"; vfile << "# comment line\n"; for (size_t i = 0; i < n; i++) { vfile << "0 " << data[i].x << " " << data[i].y << " " << data[i].z << "\n"; } vfile.close(); } template<typename Interaction> void runSimulation( PinnedBuffer<float3> &coordinates, PinnedBuffer<float3> &velocity, PinnedBuffer<float3> &forces, const int n, const float L, const float dt, const float T, Interaction f_interaction, const float Temp0 ) { // Check for input consistency assert(coordinates.size() == forces.size()); assert(coordinates.size() == velocity.size()); const int nparticles = coordinates.size(); PinnedBuffer<float3> temp_forces(n); PinnedBuffer<float> Epot_total(1); PinnedBuffer<float> Ekin_total(1); PinnedBuffer<float> Temp_scale_factor(1); PinnedBuffer<float> FrSum(1); // Total execution time of the kernel float totalTime = 0;//gpu // Allocate CUDA events to measure kernel runtime cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStream_t streamCompute, streamDataload; cudaStreamCreate(&streamCompute); cudaStreamCreate(&streamDataload); float t = 0;// algo int step_counter = 0; while (t < T) { // update r (coordinates): cudaDeviceSynchronize(); nbody_posKernel(dt, coordinates, forces, velocity); cudaDeviceSynchronize(); if (false) { coordinates.downloadFromDevice(0); printf("coordinates[0]: %.4f, %.4f, %.4f\n\n", coordinates[0].x, coordinates[0].y, coordinates[0].z); } // issue coordinate loading: if (step_counter % 100 == 0) { coordinates.downloadFromDevice(streamDataload, false); // no synch } // get new forces: nbodyShared<decltype(f_interaction)>(streamCompute, L, coordinates, temp_forces, f_interaction); if (false) { forces.downloadFromDevice(0); printf("force[0]: %.4f, %.4f, %.4f\n\n", forces[0].x, forces[0].y, forces[0].z); temp_forces.downloadFromDevice(0); printf("temp_forces[0]: %.4f, %.4f, %.4f\n\n", temp_forces[0].x, temp_forces[0].y, temp_forces[0].z); } // update velocity: nbody_veloKernel(streamCompute, dt, forces, temp_forces, velocity); if (false) { velocity.downloadFromDevice(0); printf("velocity[0]: %.4f, %.4f, %.4f\n\n", velocity[0].x, velocity[0].y, velocity[0].z); } // swap forces: std::swap(forces, temp_forces); t += dt; if (step_counter % 100 == 0) { // calculate energy: nbodyNaive_Epot(streamCompute, L, coordinates, Epot_total, f_interaction); nbodyKernel_Ekin(streamCompute, velocity, Ekin_total); // get values from GPU: Epot_total.downloadFromDevice(0); Ekin_total.downloadFromDevice(0); printf("t: %.4f\n\n", t); printf("Epot: %.4f, Ekin: %.4f, E: %.4f\n\n", Epot_total[0], Ekin_total[0], Epot_total[0]+Ekin_total[0]); // data already loaded ! cudaDeviceSynchronize(); saveData("dump_" + std::to_string(step_counter) + ".txt", coordinates.hostPtr(), n); } if (step_counter % 10 == 9) { // do temperature control: nbodyKernel_Ekin(streamCompute, velocity, Ekin_total); Ekin_total.downloadFromDevice(0); const float TempCurr = 2.0/(3.0 * n)*Ekin_total[0]; const float rescale_factor = std::sqrt(Temp0 / TempCurr); printf("rescale_factor: %.4f\n\n", rescale_factor); // sqrt because v is squared for energy. // go scale velocities: Temp_scale_factor[0] = rescale_factor; Temp_scale_factor.uploadToDevice(0); nbodyKernel_rescaleVelocities(velocity, Temp_scale_factor); /// print pressure before scaling: nbodyNaive_FrSum(streamCompute, L, coordinates, FrSum, f_interaction); FrSum.downloadFromDevice(0); const float V = L*L*L; const float pressure = 1.0 * TempCurr * n / V + 1.0/(3.0*V)*FrSum[0]; printf("pressure: %.4f\n\n", pressure); } step_counter++; } coordinates.downloadFromDevice(0); forces. downloadFromDevice(0); velocity. downloadFromDevice(0); printf("runtime: %.3fms\n\n", totalTime); } int main(int argc, char** argv) { int n = 50000; float L = 10; float dt = 0.000001;//0.0001; float T = 1.0; float epsilon = 0.1; float sigma = 0.5; if (argc > 1) { n = atoi(argv[1]); assert(n > 0); } if (argc > 2) { epsilon = std::stof(argv[2]); assert(epsilon > 0); } if (argc > 3) { sigma = std::stof(argv[3]); assert(sigma > 0); } PinnedBuffer<float3> coordinates(n), forces(n), velocity(n); init_data(coordinates, velocity, forces, n, L); // Transfer data to the GPU coordinates.uploadToDevice(0); velocity.uploadToDevice(0); forces.uploadToDevice(0); //Pairwise_Gravity gravity(10.0); Pairwise_LJ ljforce( epsilon, sigma ); runSimulation( coordinates, velocity, forces, n, L, dt, T, ljforce, 10.0// Temp0 -> a complete guess. ); return 0; }
475bf0451e392bc7e6c9e4580ef05e83bb1de89b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong @generated from zpotf2_kernels.cu normal z -> s, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_s #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float dble_shared_data[]; ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void sdot_kernel_batched(int n, float **x_array, int incx, int offset, magma_int_t *info_array, int gbstep) { int tx = threadIdx.x; float *x = x_array[blockIdx.z]+offset; float *sdata = dble_shared_data; float res = MAGMA_S_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_S_REAL(res * MAGMA_S_CNJG(res)); __syncthreads(); for(int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile float* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { float xreal = MAGMA_S_REAL(x[n*incx]); //MAGMA_S_SET2REAL(x[n*incx], sqrt(xreal - sdata[0])); x[n*incx] = MAGMA_S_MAKE(sqrt(xreal - sdata[0]), 0); if(x[n*incx] == MAGMA_S_ZERO){ info_array[blockIdx.z] = offset + gbstep + 1; } } } void magma_spotf2_sdot_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { /* Specialized Sdot 1) performs sdot sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > MAX_NTHREADS) { printf("n = %d > %d is not supported in spotf2_sdot\n", (int) n, (int) MAX_NTHREADS); } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } dim3 grid(1, 1, batchCount); hipLaunchKernelGGL(( sdot_kernel_batched), dim3(grid), dim3(threadSize), threadSize * sizeof(float), magma_stream, n, x_array, incx, offset, info_array, gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void sscal_kernel_batched(int n, float **x_array, int incx, int offset, magma_int_t *info_array) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int id = threadIdx.x; float *x = x_array[blockIdx.z]+offset; __shared__ float factor; if (threadIdx.x == 0) { factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id >0) { x[id*incx] = x[id*incx] * factor; //printf("x=%f", x[id*incx]); } } void magma_spotf2_sscal_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount) { /* Specialized Sscal perform x[1:n-1]/x[0] */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); hipLaunchKernelGGL(( sscal_kernel_batched), dim3(grid), dim3(threads), 0, magma_stream , n, x_array, incx, offset, info_array); } ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(PRECISION_z) || defined(PRECISION_c) __global__ void slacgv_kernel_batched(int n, float **x_array, int incx, int offset) { int id = threadIdx.x; float *x = x_array[blockIdx.z]+offset; if ( id < n ) { x[id*incx] = MAGMA_S_CNJG(x[id*incx]); } } void magma_slacgv_batched(magma_int_t n, float **x_array, magma_int_t incx, int offset, int batchCount) { /* Purpose ======= SLACGV conjugates a real vector of length N. Arguments ========= N (input) INTEGER The length of the vector X. N >= 0. X (input/output) REAL array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). INCX (input) INTEGER The spacing between successive elements of X. ===================================================================== */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); hipLaunchKernelGGL(( slacgv_kernel_batched), dim3(grid), dim3(threads), 0, magma_stream , n, x_array, incx, offset); } #endif // defined(PRECISION_z) || defined(PRECISION_c) ///////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void spotf2_device(int m, int n, float *A, int lda, float alpha, float beta, magma_int_t *info, int gbstep) { /* Each thread block load entire A into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ // checkinfo to avoid computation of the singular matrix if(*info != 0 ) return; int tx = threadIdx.x; float *sdata_A = shared_data; __shared__ float factor; __shared__ float sum[POTF2_TILE_SIZE]; // load A into sdata_A if(tx < m) { for(int i=0; i<n; i++) { sdata_A[tx + i * m] = A[tx + i * lda]; } } __syncthreads(); for(int iter=0; iter<n; iter++) { float res = MAGMA_D_ZERO; float res1 = MAGMA_S_ZERO; //1) performs sdot sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1]) //2) updates A[iter,iter] = sqrt(A[iter,iter]-sum); if(tx<iter) { res = MAGMA_S_REAL (sdata_A[iter + tx * m] * MAGMA_S_CNJG(sdata_A[iter + tx * m])); sum[tx] = res; } else { sum[tx] = 0.0; } __syncthreads(); magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall spotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms //magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28. //magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); if (tx == 0) { float xreal = MAGMA_S_REAL(sdata_A[iter + iter * m]); sdata_A[iter + iter * m] = MAGMA_S_MAKE(sqrt(xreal - sum[0]), 0); if(sdata_A[iter + iter * m] == MAGMA_S_ZERO){ *info = iter + gbstep + 1; } } __syncthreads(); if(sdata_A[iter + iter * m] == MAGMA_S_ZERO) return; __syncthreads(); //slacgv conjugates a real vector of length iter. //TODO #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_S_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // sgemv // Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row). if(tx < m && tx > iter) { for(int j=0; j < iter; j++) { res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the slacgv conj to be done automatically here implicitly. } sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta; } __syncthreads(); //slacgv conjugates a real vector of length iter. #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_S_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // sscal perform A[iter:n-1, iter]/A[iter,iter]; if (tx == 0) { factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(sdata_A[iter + iter * m]), 0.0); } __syncthreads(); if ( tx < m && tx > iter) { sdata_A[ tx + iter * m ] *= factor; } __syncthreads(); }// end of iter //copy sdata_A to A if(tx < m) { for(int i=0; i<n; i++) { A[tx + i * lda] = sdata_A[tx + i * m]; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void spotf2_kernel_batched(int m, int n, float **dA_array, int lda, float alpha, float beta, magma_int_t *info_array, int gbstep) { /* Each thread block load entire dA_array[blockIdx.z] into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ int batchid = blockIdx.z; spotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void spotf2_kernel(int m, int n, float *dA, int lda, float alpha, float beta, magma_int_t *info) { spotf2_device(m, n, dA, lda, alpha, beta, info, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- spotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA REAL array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_sposv_aux ********************************************************************/ extern "C" magma_int_t magma_spotf2_tile_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, float **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } float alpha = MAGMA_S_NEG_ONE; float beta = MAGMA_S_ONE; dim3 dimGrid(1, 1, batchCount); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1); hipLaunchKernelGGL(( spotf2_kernel_batched), dim3(dimGrid), dim3(threads), shared_mem_size , 0, m, n, dA_array, lda, alpha, beta, info_array, gbstep); return arginfo; } ///////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_spotf2_tile( magma_uplo_t uplo, magma_int_t m, magma_int_t n, float *dA, magma_int_t lda, magma_int_t *info) { *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } else if (m < n) { *info = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); *info = -1; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } float alpha = MAGMA_S_NEG_ONE; float beta = MAGMA_S_ONE; dim3 dimGrid(1); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1); hipLaunchKernelGGL(( spotf2_kernel), dim3(dimGrid), dim3(threads), shared_mem_size , 0, m, n, dA, lda, alpha, beta, info); return *info; }
475bf0451e392bc7e6c9e4580ef05e83bb1de89b.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2013 @author Azzam Haidar @author Tingxing Dong @generated from zpotf2_kernels.cu normal z -> s, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_s #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float shared_data[]; // dynamically allocated shared memory, set to size number of threads when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ float dble_shared_data[]; ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void sdot_kernel_batched(int n, float **x_array, int incx, int offset, magma_int_t *info_array, int gbstep) { int tx = threadIdx.x; float *x = x_array[blockIdx.z]+offset; float *sdata = dble_shared_data; float res = MAGMA_S_ZERO; if (tx < n) { res = x[tx*incx]; } sdata[tx] = MAGMA_S_REAL(res * MAGMA_S_CNJG(res)); __syncthreads(); for(int s = blockDim.x/2; s > 32; s >>= 1 ) { if (tx < s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if (tx < 32) { volatile float* smem = sdata; smem[tx] += smem[tx+32]; smem[tx] += smem[tx+16]; smem[tx] += smem[tx+8]; smem[tx] += smem[tx+4]; smem[tx] += smem[tx+2]; smem[tx] += smem[tx+1]; } if (tx == 0) { float xreal = MAGMA_S_REAL(x[n*incx]); //MAGMA_S_SET2REAL(x[n*incx], sqrt(xreal - sdata[0])); x[n*incx] = MAGMA_S_MAKE(sqrt(xreal - sdata[0]), 0); if(x[n*incx] == MAGMA_S_ZERO){ info_array[blockIdx.z] = offset + gbstep + 1; } } } void magma_spotf2_sdot_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { /* Specialized Sdot 1) performs sdot sum = x[0:n-1]*conj(x[0:n-1]) 2) updates x[n] = sqrt(x[n]-sum); */ if (n > MAX_NTHREADS) { printf("n = %d > %d is not supported in spotf2_sdot\n", (int) n, (int) MAX_NTHREADS); } int threadSize; if (n <= 1024 && n > 512) { threadSize = 1024; } else if (n <= 512 && n > 256 ) { threadSize = 512; } else if (n <= 256 && n > 128) { threadSize = 256; } else if (n <= 128 && n > 64) { threadSize = 128; } else { threadSize = 64; } dim3 grid(1, 1, batchCount); sdot_kernel_batched<<< grid, threadSize, threadSize * sizeof(float), magma_stream>>> (n, x_array, incx, offset, info_array, gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void sscal_kernel_batched(int n, float **x_array, int incx, int offset, magma_int_t *info_array) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int id = threadIdx.x; float *x = x_array[blockIdx.z]+offset; __shared__ float factor; if (threadIdx.x == 0) { factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(x[0]), 0.0); } __syncthreads(); if ( id < n && id >0) { x[id*incx] = x[id*incx] * factor; //printf("x=%f", x[id*incx]); } } void magma_spotf2_sscal_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t offset, magma_int_t *info_array, magma_int_t batchCount) { /* Specialized Sscal perform x[1:n-1]/x[0] */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); sscal_kernel_batched<<< grid, threads, 0, magma_stream >>> (n, x_array, incx, offset, info_array); } ///////////////////////////////////////////////////////////////////////////////////////////////// #if defined(PRECISION_z) || defined(PRECISION_c) __global__ void slacgv_kernel_batched(int n, float **x_array, int incx, int offset) { int id = threadIdx.x; float *x = x_array[blockIdx.z]+offset; if ( id < n ) { x[id*incx] = MAGMA_S_CNJG(x[id*incx]); } } void magma_slacgv_batched(magma_int_t n, float **x_array, magma_int_t incx, int offset, int batchCount) { /* Purpose ======= SLACGV conjugates a real vector of length N. Arguments ========= N (input) INTEGER The length of the vector X. N >= 0. X (input/output) REAL array, dimension (1+(N-1)*abs(INCX)) On entry, the vector of length N to be conjugated. On exit, X is overwritten with conjg(X). INCX (input) INTEGER The spacing between successive elements of X. ===================================================================== */ dim3 grid(1, 1, batchCount); dim3 threads(n, 1, 1); slacgv_kernel_batched<<< grid, threads, 0, magma_stream >>> (n, x_array, incx, offset); } #endif // defined(PRECISION_z) || defined(PRECISION_c) ///////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void spotf2_device(int m, int n, float *A, int lda, float alpha, float beta, magma_int_t *info, int gbstep) { /* Each thread block load entire A into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ // checkinfo to avoid computation of the singular matrix if(*info != 0 ) return; int tx = threadIdx.x; float *sdata_A = shared_data; __shared__ float factor; __shared__ float sum[POTF2_TILE_SIZE]; // load A into sdata_A if(tx < m) { for(int i=0; i<n; i++) { sdata_A[tx + i * m] = A[tx + i * lda]; } } __syncthreads(); for(int iter=0; iter<n; iter++) { float res = MAGMA_D_ZERO; float res1 = MAGMA_S_ZERO; //1) performs sdot sum = A[iter, 0:iter-1]*conj(A[iter, 0:iter-1]) //2) updates A[iter,iter] = sqrt(A[iter,iter]-sum); if(tx<iter) { res = MAGMA_S_REAL (sdata_A[iter + tx * m] * MAGMA_S_CNJG(sdata_A[iter + tx * m])); sum[tx] = res; } else { sum[tx] = 0.0; } __syncthreads(); magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum);//tried on K40: if m=32 n=32 the overall spotf2_device routine time is 60ms n=16 time=25 n=8 time=20ms //magma_sum_reduce_n(iter, tx, sum); //tried on K40: if m=32 n=32 the time went from 61ms to 70ms when switching to reduce_n. n=16 time=28. //magma_sum_reduce_inlined(iter, tx, sum); //tried on K40: similar to magma_sum_reduce<POTF2_TILE_SIZE>(tx, sum); if (tx == 0) { float xreal = MAGMA_S_REAL(sdata_A[iter + iter * m]); sdata_A[iter + iter * m] = MAGMA_S_MAKE(sqrt(xreal - sum[0]), 0); if(sdata_A[iter + iter * m] == MAGMA_S_ZERO){ *info = iter + gbstep + 1; } } __syncthreads(); if(sdata_A[iter + iter * m] == MAGMA_S_ZERO) return; __syncthreads(); //slacgv conjugates a real vector of length iter. //TODO #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_S_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // sgemv // Compute elements iter:n-1 of column iter = A(iter:n,0:iter-1) * A(iter-1,0:iter-1) (row). if(tx < m && tx > iter) { for(int j=0; j < iter; j++) { res1 += sdata_A[tx + j * m] * sdata_A[iter + j * m]; // TODO move the slacgv conj to be done automatically here implicitly. } sdata_A [tx + iter * m] = alpha * res1 + sdata_A [tx + iter * m] * beta; } __syncthreads(); //slacgv conjugates a real vector of length iter. #if defined(PRECISION_z) || defined(PRECISION_c) if(tx < iter) { sdata_A[iter + tx * m] = MAGMA_S_CNJG(sdata_A[iter + tx * m]); } __syncthreads(); #endif // sscal perform A[iter:n-1, iter]/A[iter,iter]; if (tx == 0) { factor = MAGMA_S_MAKE(1.0/MAGMA_S_REAL(sdata_A[iter + iter * m]), 0.0); } __syncthreads(); if ( tx < m && tx > iter) { sdata_A[ tx + iter * m ] *= factor; } __syncthreads(); }// end of iter //copy sdata_A to A if(tx < m) { for(int i=0; i<n; i++) { A[tx + i * lda] = sdata_A[tx + i * m]; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void spotf2_kernel_batched(int m, int n, float **dA_array, int lda, float alpha, float beta, magma_int_t *info_array, int gbstep) { /* Each thread block load entire dA_array[blockIdx.z] into shared memory factorize it and copy back. n must be small enough to fit shared memory. n is checked by a macro POTF2_TILE_SIZE before the kernel. */ int batchid = blockIdx.z; spotf2_device(m, n, dA_array[batchid], lda, alpha, beta, &(info_array[batchid]), gbstep); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void spotf2_kernel(int m, int n, float *dA, int lda, float alpha, float beta, magma_int_t *info) { spotf2_device(m, n, dA, lda, alpha, beta, info, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- spotf2 computes the Cholesky factorization of a real symmetric positive definite matrix A. The factorization has the form A = U**H * U, if UPLO = MagmaUpper, or A = L * L**H, if UPLO = MagmaLower, where U is an upper triangular matrix and L is lower triangular. This is the unblocked version of the algorithm, calling Level 2 BLAS. Arguments --------- @param[in] uplo magma_uplo_t Specifies whether the upper or lower triangular part of the symmetric matrix A is stored. - = MagmaUpper: Upper triangular - = MagmaLower: Lower triangular @param[in] n INTEGER The order of the matrix A. N >= 0 and N <= 512. @param[in,out] dA REAL array, dimension (LDDA,N) On entry, the symmetric matrix A. If UPLO = MagmaUpper, the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = MagmaLower, the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. \n On exit, if INFO = 0, the factor U or L from the Cholesky factorization A = U**H * U or A = L * L**H. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,N). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, the leading minor of order k is not positive definite, and the factorization could not be completed. @ingroup magma_sposv_aux ********************************************************************/ extern "C" magma_int_t magma_spotf2_tile_batched( magma_uplo_t uplo, magma_int_t m, magma_int_t n, float **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount) { magma_int_t arginfo = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { arginfo = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE || n > POTF2_TILE_SIZE) { arginfo = -2; } else if (lda < max(1,m)) { arginfo = -4; } else if (m < n) { arginfo = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } // Quick return if possible if (m == 0 || n == 0) { return arginfo; } float alpha = MAGMA_S_NEG_ONE; float beta = MAGMA_S_ONE; dim3 dimGrid(1, 1, batchCount); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1); spotf2_kernel_batched<<<dimGrid, threads, shared_mem_size >>>(m, n, dA_array, lda, alpha, beta, info_array, gbstep); return arginfo; } ///////////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_spotf2_tile( magma_uplo_t uplo, magma_int_t m, magma_int_t n, float *dA, magma_int_t lda, magma_int_t *info) { *info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower) { *info = -1; } else if (m < 0 || n < 0 || m > POTF2_TILE_SIZE) { *info = -2; } else if (lda < max(1,m)) { *info = -4; } else if (m < n) { *info = -10; } if (uplo == MagmaUpper) { printf("Upper side is unavailable \n"); *info = -1; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } float alpha = MAGMA_S_NEG_ONE; float beta = MAGMA_S_ONE; dim3 dimGrid(1); dim3 threads(POTF2_TILE_SIZE, 1); int shared_mem_size = sizeof(float)*m*n; // + sizeof(float)*(POTF2_TILE_SIZE+1); spotf2_kernel<<<dimGrid, threads, shared_mem_size >>>(m, n, dA, lda, alpha, beta, info); return *info; }
e909ae7b7cde1f9b5ae1b4fd201fd4deb2c460fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_hash_map.h" #include <iostream> #define DELTA 0.01 namespace NAMESPACE { __global__ void InitializeMapEntries(const float* __restrict__ points, const int* __restrict__ numValid, int* validOutputVoxels, float RangeMinX, float RangeMinY, float RangeMinZ, float VoxelSizeX, float VoxelSizeY, float VoxelSizeZ, int GridX, int GridY, int GridZ, int* map, HashEntry* list, int batchSize, int cols, int maxInputPoints, int maxOutputVoxels, int* map_addr, const int value_map_z) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x;; const float Intervel_Oz = (float)GridZ / (float)value_map_z + 0.001; for(int i = idx ; i < batchSize * maxInputPoints; i += stride) { const int curBatch = i / maxInputPoints; const int curPoints = i % maxInputPoints; if(curPoints >= numValid[curBatch]) continue; const float* cur_point = points + i * cols; int bs = __float2int_rd(cur_point[0] + DELTA); int x = __float2int_rd((cur_point[1] - RangeMinX) / VoxelSizeX); int y = __float2int_rd((cur_point[2] - RangeMinY) / VoxelSizeY); int z = __float2int_rd((cur_point[3] - RangeMinZ) / VoxelSizeZ); HashEntry* entry = list + i; if(bs!=curBatch || x<0 || y<0 || z<0 || x>=GridX || y>=GridY || z>=GridZ) continue; int4 coor; coor.x = bs; coor.y = z; coor.z = y; coor.w = x; int hash_idx = bs * GridX * GridY * value_map_z + (int)(z / Intervel_Oz) * GridX * GridY + x * GridY + y; entry -> intCoor = coor; int* address = map + hash_idx; int newVal = i; int curVal = *address; int assumed; do { assumed = curVal; curVal = atomicCAS(address, assumed, newVal); } while (assumed != curVal); entry -> nextId = curVal; if(curVal == -1) { int old_num = atomicAdd(validOutputVoxels + bs, 1); if(old_num < maxOutputVoxels) map_addr[bs * maxOutputVoxels + old_num] = hash_idx; } } } __global__ void InitializeMapEntriesFp16(const __half* __restrict__ points, const int* __restrict__ numValid, int* validOutputVoxels, float RangeMinX, float RangeMinY, float RangeMinZ, float VoxelSizeX, float VoxelSizeY, float VoxelSizeZ, int GridX, int GridY, int GridZ, int* map, HashEntry* list, int batchSize, int cols, int maxInputPoints, int maxOutputVoxels, int* map_addr, const int value_map_z) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x;; const float Intervel_Oz = (float)GridZ / (float)value_map_z + 0.001; for(int i = idx ; i < batchSize * maxInputPoints; i += stride) { const int curBatch = i / maxInputPoints; const int curPoints = i % maxInputPoints; if(curPoints >= numValid[curBatch]) continue; const __half* cur_point = points + i * cols; int bs = __float2int_rd(__half2float(cur_point[0]) + DELTA); int x = __float2int_rd((__half2float(cur_point[1]) - RangeMinX) / VoxelSizeX); int y = __float2int_rd((__half2float(cur_point[2]) - RangeMinY) / VoxelSizeY); int z = __float2int_rd((__half2float(cur_point[3]) - RangeMinZ) / VoxelSizeZ); HashEntry* entry = list + i; if(bs!=curBatch || x<0 || y<0 || z<0 || x>=GridX || y>=GridY || z>=GridZ) continue; int4 coor; coor.x = bs; coor.y = z; coor.z = y; coor.w = x; int hash_idx = bs * GridX * GridY * value_map_z + (int)(z / Intervel_Oz) * GridX * GridY + x * GridY + y; entry -> intCoor = coor; int* address = map + hash_idx; int newVal = i; int curVal = *address; int assumed; do { assumed = curVal; curVal = atomicCAS(address, assumed, newVal); } while (assumed != curVal); entry -> nextId = curVal; if(curVal == -1) { int old_num = atomicAdd(validOutputVoxels + bs, 1); if(old_num < maxOutputVoxels) map_addr[bs * maxOutputVoxels + old_num] = hash_idx; } } } void InitializeHashMap(const float* points, const int* numValid, int* validOutputVoxels, int* map, HashEntry* list, int* map_addr, int batchSize, int maxInputPoints, int maxOutputVoxels, int inCols, std::vector<float> point_cloud_range, std::vector<float> voxel_size, std::vector<int> grid_size, const int value_map_z) { int num_thread; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch num_thread = batchSize * maxInputPoints; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, InitializeMapEntries, 0, num_thread); minGridSize = ::min(minGridSize, DivUp(num_thread, blockSize)); hipLaunchKernelGGL(( InitializeMapEntries), dim3(minGridSize), dim3(blockSize), 0, 0, points, numValid, validOutputVoxels, point_cloud_range[0], point_cloud_range[1], point_cloud_range[2], voxel_size [0], voxel_size [1], voxel_size [2], grid_size [0], grid_size [1], grid_size [2], map, list, batchSize, inCols, maxInputPoints, maxOutputVoxels, map_addr, value_map_z); } void InitializeHashMapFp16(const __half* points, const int* numValid, int* validOutputVoxels, int* map, HashEntry* list, int* map_addr, int batchSize, int maxInputPoints, int maxOutputVoxels, int inCols, std::vector<float> point_cloud_range, std::vector<float> voxel_size, std::vector<int> grid_size, const int value_map_z) { int num_thread; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch num_thread = batchSize * maxInputPoints; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, InitializeMapEntriesFp16, 0, num_thread); minGridSize = ::min(minGridSize, DivUp(num_thread, blockSize)); hipLaunchKernelGGL(( InitializeMapEntriesFp16), dim3(minGridSize), dim3(blockSize), 0, 0, points, numValid, validOutputVoxels, point_cloud_range[0], point_cloud_range[1], point_cloud_range[2], voxel_size [0], voxel_size [1], voxel_size [2], grid_size [0], grid_size [1], grid_size [2], map, list, batchSize, inCols, maxInputPoints, maxOutputVoxels, map_addr, value_map_z); } }
e909ae7b7cde1f9b5ae1b4fd201fd4deb2c460fc.cu
#include "gpu_hash_map.h" #include <iostream> #define DELTA 0.01 namespace NAMESPACE { __global__ void InitializeMapEntries(const float* __restrict__ points, const int* __restrict__ numValid, int* validOutputVoxels, float RangeMinX, float RangeMinY, float RangeMinZ, float VoxelSizeX, float VoxelSizeY, float VoxelSizeZ, int GridX, int GridY, int GridZ, int* map, HashEntry* list, int batchSize, int cols, int maxInputPoints, int maxOutputVoxels, int* map_addr, const int value_map_z) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x;; const float Intervel_Oz = (float)GridZ / (float)value_map_z + 0.001; for(int i = idx ; i < batchSize * maxInputPoints; i += stride) { const int curBatch = i / maxInputPoints; const int curPoints = i % maxInputPoints; if(curPoints >= numValid[curBatch]) continue; const float* cur_point = points + i * cols; int bs = __float2int_rd(cur_point[0] + DELTA); int x = __float2int_rd((cur_point[1] - RangeMinX) / VoxelSizeX); int y = __float2int_rd((cur_point[2] - RangeMinY) / VoxelSizeY); int z = __float2int_rd((cur_point[3] - RangeMinZ) / VoxelSizeZ); HashEntry* entry = list + i; if(bs!=curBatch || x<0 || y<0 || z<0 || x>=GridX || y>=GridY || z>=GridZ) continue; int4 coor; coor.x = bs; coor.y = z; coor.z = y; coor.w = x; int hash_idx = bs * GridX * GridY * value_map_z + (int)(z / Intervel_Oz) * GridX * GridY + x * GridY + y; entry -> intCoor = coor; int* address = map + hash_idx; int newVal = i; int curVal = *address; int assumed; do { assumed = curVal; curVal = atomicCAS(address, assumed, newVal); } while (assumed != curVal); entry -> nextId = curVal; if(curVal == -1) { int old_num = atomicAdd(validOutputVoxels + bs, 1); if(old_num < maxOutputVoxels) map_addr[bs * maxOutputVoxels + old_num] = hash_idx; } } } __global__ void InitializeMapEntriesFp16(const __half* __restrict__ points, const int* __restrict__ numValid, int* validOutputVoxels, float RangeMinX, float RangeMinY, float RangeMinZ, float VoxelSizeX, float VoxelSizeY, float VoxelSizeZ, int GridX, int GridY, int GridZ, int* map, HashEntry* list, int batchSize, int cols, int maxInputPoints, int maxOutputVoxels, int* map_addr, const int value_map_z) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x;; const float Intervel_Oz = (float)GridZ / (float)value_map_z + 0.001; for(int i = idx ; i < batchSize * maxInputPoints; i += stride) { const int curBatch = i / maxInputPoints; const int curPoints = i % maxInputPoints; if(curPoints >= numValid[curBatch]) continue; const __half* cur_point = points + i * cols; int bs = __float2int_rd(__half2float(cur_point[0]) + DELTA); int x = __float2int_rd((__half2float(cur_point[1]) - RangeMinX) / VoxelSizeX); int y = __float2int_rd((__half2float(cur_point[2]) - RangeMinY) / VoxelSizeY); int z = __float2int_rd((__half2float(cur_point[3]) - RangeMinZ) / VoxelSizeZ); HashEntry* entry = list + i; if(bs!=curBatch || x<0 || y<0 || z<0 || x>=GridX || y>=GridY || z>=GridZ) continue; int4 coor; coor.x = bs; coor.y = z; coor.z = y; coor.w = x; int hash_idx = bs * GridX * GridY * value_map_z + (int)(z / Intervel_Oz) * GridX * GridY + x * GridY + y; entry -> intCoor = coor; int* address = map + hash_idx; int newVal = i; int curVal = *address; int assumed; do { assumed = curVal; curVal = atomicCAS(address, assumed, newVal); } while (assumed != curVal); entry -> nextId = curVal; if(curVal == -1) { int old_num = atomicAdd(validOutputVoxels + bs, 1); if(old_num < maxOutputVoxels) map_addr[bs * maxOutputVoxels + old_num] = hash_idx; } } } void InitializeHashMap(const float* points, const int* numValid, int* validOutputVoxels, int* map, HashEntry* list, int* map_addr, int batchSize, int maxInputPoints, int maxOutputVoxels, int inCols, std::vector<float> point_cloud_range, std::vector<float> voxel_size, std::vector<int> grid_size, const int value_map_z) { int num_thread; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch num_thread = batchSize * maxInputPoints; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, InitializeMapEntries, 0, num_thread); minGridSize = std::min(minGridSize, DivUp(num_thread, blockSize)); InitializeMapEntries<<<minGridSize, blockSize>>>(points, numValid, validOutputVoxels, point_cloud_range[0], point_cloud_range[1], point_cloud_range[2], voxel_size [0], voxel_size [1], voxel_size [2], grid_size [0], grid_size [1], grid_size [2], map, list, batchSize, inCols, maxInputPoints, maxOutputVoxels, map_addr, value_map_z); } void InitializeHashMapFp16(const __half* points, const int* numValid, int* validOutputVoxels, int* map, HashEntry* list, int* map_addr, int batchSize, int maxInputPoints, int maxOutputVoxels, int inCols, std::vector<float> point_cloud_range, std::vector<float> voxel_size, std::vector<int> grid_size, const int value_map_z) { int num_thread; int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device launch num_thread = batchSize * maxInputPoints; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, InitializeMapEntriesFp16, 0, num_thread); minGridSize = std::min(minGridSize, DivUp(num_thread, blockSize)); InitializeMapEntriesFp16<<<minGridSize, blockSize>>>(points, numValid, validOutputVoxels, point_cloud_range[0], point_cloud_range[1], point_cloud_range[2], voxel_size [0], voxel_size [1], voxel_size [2], grid_size [0], grid_size [1], grid_size [2], map, list, batchSize, inCols, maxInputPoints, maxOutputVoxels, map_addr, value_map_z); } }
2dda17e0518d8c612948c98b41bc913025259ef9.hip
// !!! This is a file automatically generated by hipify!!! //#include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include "skelft.h" #include <stdio.h> // Parameters for CUDA kernel executions; more or less optimized for a 1024x1024 image. #define BLOCKX 16 #define BLOCKY 16 #define BLOCKSIZE 64 #define TILE_DIM 32 #define BLOCK_ROWS 16 //#define max(x,y) (x<y)? y : x //#define min(x,y) (x<y)? x : y /****** Global Variables *******/ const int NB = 8; // Nr buffers we use and store in the entire framework short2 **pbaTextures; // Work buffers used to compute and store resident images // 0: work buffer // 1: FT // 2: thresholded DT // 3: thresholded skeleton // 4: topology analysis // 5: work buffer for topology // 6: skeleton FT // float* pbaTexSiteParam; // Stores boundary parameterization float* curr_site_, *prev_site_, *curr_dt_, *prev_dt_; int pbaTexSize; // Texture size (squared) actually used in all computations int floodBand = 4, // Various FT computation parameters; defaults are good for an 1024x1024 image. maurerBand = 4, colorBand = 4; texture<short2> pbaTexColor; // 2D textures (bound to various buffers defined above as needed) texture<short2> pbaTexColor2; // texture<short2> pbaTexLinks; texture<float> pbaTexParam; // 1D site parameterization texture (bound to pbaTexSiteParam) texture<float> curr_site_tex, curr_dt_tex, prev_site_tex, prev_dt_tex; texture<unsigned char> pbaTexGray; // 2D texture of unsigned char values, e.g. the binary skeleton #if __CUDA_ARCH__ < 110 // We cannot use atomic intrinsics on SM10 or below. Thus, we define these as nop. #define atomicInc(a,b) 0 // The default will be that some code e.g. endpoint detection will thus not do anything. #endif /********* Kernels ********/ #include "skelftKernel.h" // Initialize necessary memory (CPU/GPU sides) // - textureSize: The max size of any image we will process until re-initialization void skelft2DInitialization(int maxTexSize) { hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp,0); // Query device properties, list something about them int pbaMemSize = maxTexSize * maxTexSize * sizeof(short2); // A buffer has 2 shorts / pixel pbaTextures = (short2 **) malloc(NB * sizeof(short2*)); // We will use NB buffers for(int i=0;i<NB;++i) hipMalloc((void **) &pbaTextures[i], pbaMemSize); // Allocate work buffer 'i' hipMalloc((void **) &pbaTexSiteParam, maxTexSize * maxTexSize * sizeof(float)); // Sites texture hipMalloc((void **) &curr_site_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture hipMalloc((void **) &prev_site_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture hipMalloc((void **) &curr_dt_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture hipMalloc((void **) &prev_dt_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture } // Deallocate all allocated memory void skelft2DDeinitialization() { for(int i=0;i<NB;++i) hipFree(pbaTextures[i]); hipFree(pbaTexSiteParam); hipFree(curr_site_); hipFree(prev_site_); hipFree(curr_dt_); hipFree(prev_dt_); free(pbaTextures); } __global__ void kernelSiteParamInit(short2* inputVoro, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); float param = tex1Dfetch(pbaTexParam,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points short2& v = inputVoro[i]; v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT. if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { //We could use an if-then-else here, but it's faster with an if-then v.x = tx; v.y = ty; } } } void skelft2DInitializeInput(float* sites, int size) // Copy input sites from CPU to GPU; Also set up site param initialization in pbaTextures[0] { pbaTexSize = size; // Size of the actual texture being used in this run; can be smaller than the max-tex-size // which was used in skelft2DInitialization() hipMemcpy(pbaTexSiteParam, sites, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); // Pass sites parameterization to CUDA. Must be done before calling the initialization // kernel, since we use the sites-param as a texture in that kernel hipBindTexture(0, pbaTexParam, pbaTexSiteParam); // Bind the sites-param as a 1D texture so we can quickly index it next dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipLaunchKernelGGL(( kernelSiteParamInit), dim3(grid),dim3(block), 0, 0, pbaTextures[0],pbaTexSize); // Do the site param initialization. This sets up pbaTextures[0] hipUnbindTexture(pbaTexParam); } // In-place transpose a squared texture. // Block orders are modified to optimize memory access. // Point coordinates are also swapped. void pba2DTranspose(short2 *texture) { dim3 block(TILE_DIM, BLOCK_ROWS); dim3 grid(pbaTexSize / TILE_DIM, pbaTexSize / TILE_DIM); hipBindTexture(0, pbaTexColor, texture); hipLaunchKernelGGL(( kernelTranspose), dim3(grid), dim3(block) , 0, 0, texture, pbaTexSize); hipUnbindTexture(pbaTexColor); } // Phase 1 of PBA. m1 must divides texture size void pba2DPhase1(int m1, short xm, short ym, short xM, short yM) { dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m1); // Flood vertically in their own bands hipBindTexture(0, pbaTexColor, pbaTextures[0]); hipLaunchKernelGGL(( kernelFloodDown), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, pbaTexSize / m1); hipUnbindTexture(pbaTexColor); hipBindTexture(0, pbaTexColor, pbaTextures[1]); hipLaunchKernelGGL(( kernelFloodUp), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, pbaTexSize / m1); // Passing information between bands grid = dim3(pbaTexSize / block.x, m1); hipLaunchKernelGGL(( kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m1); hipBindTexture(0, pbaTexLinks, pbaTextures[0]); hipLaunchKernelGGL(( kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize, m1, pbaTexSize / m1); hipUnbindTexture(pbaTexLinks); hipUnbindTexture(pbaTexColor); } // Phase 2 of PBA. m2 must divides texture size void pba2DPhase2(int m2) { // Compute proximate points locally in each band dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m2); hipBindTexture(0, pbaTexColor, pbaTextures[1]); hipLaunchKernelGGL(( kernelProximatePoints), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m2); hipBindTexture(0, pbaTexLinks, pbaTextures[0]); hipLaunchKernelGGL(( kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / m2); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3(pbaTexSize / block.x, noBand / 2); hipLaunchKernelGGL(( kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize, pbaTexSize / noBand); } // Replace the forward link with the X coordinate of the seed to remove // the need of looking at the other texture. We need it for coloring. grid = dim3(pbaTexSize / block.x, pbaTexSize); hipLaunchKernelGGL(( kernelDoubleToSingleList), dim3(grid), dim3(block) , 0, 0, pbaTextures[0], pbaTexSize); hipUnbindTexture(pbaTexLinks); hipUnbindTexture(pbaTexColor); } // Phase 3 of PBA. m3 must divides texture size void pba2DPhase3(int m3) { dim3 block = dim3(BLOCKSIZE / m3, m3); dim3 grid = dim3(pbaTexSize / block.x); hipBindTexture(0, pbaTexColor, pbaTextures[0]); hipLaunchKernelGGL(( kernelColor), dim3(grid), dim3(block) , 0, 0, pbaTextures[1], pbaTexSize); hipUnbindTexture(pbaTexColor); } void skel2DFTCompute(short xm, short ym, short xM, short yM, int floodBand, int maurerBand, int colorBand) { pba2DPhase1(floodBand,xm,ym,xM,yM); //Vertical sweep pba2DTranspose(pbaTextures[1]); // pba2DPhase2(maurerBand); //Horizontal coloring pba2DPhase3(colorBand); //Row coloring pba2DTranspose(pbaTextures[1]); } __global__ void kernelThresholdDT(unsigned char* output, int size, float threshold2, short xm, short ym, short xM, short yM) //Input: pbaTexColor: closest-site-ids per pixel, i.e. FT //Output: output: thresholded DT { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id = TOID(tx, ty, size); short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); output[id] = (d2<=threshold2); //threshold DT into binary image } } __global__ void kernelDT(short* output, int size, float threshold2, short xm, short ym, short xM, short yM) //Input: pbaTexColor: closest-site-ids per pixel, i.e. FT //Output: output: DT { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id = TOID(tx, ty, size); short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); output[id] = sqrtf(d2); //save the Euclidean DT } } __global__ void kernelSkel(float* output, bool* fg, short xm, short ym, short xM, short yM, short size, float threshold, float length) //Input: pbaTexColor: closest-site-ids per pixel // pbaTexParam: labels for sites (only valid at site locations) { //Output: output: binary thresholded skeleton int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) { int id = TOID(tx, ty, size); int Id = id; short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y int id2 = TOID(voroid.x,voroid.y,size); //convert the site's coord to an index into pbaTexParam[], the site-label-texture float imp = tex1Dfetch(pbaTexParam,id2); //get the site's label float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); float dt = sqrtf(d2); //save the Euclidean DT ++id; //TOID(tx+1,ty,size) voroid = tex1Dfetch(pbaTexColor,id); // id2 = TOID(voroid.x,voroid.y,size); // float imp_r = tex1Dfetch(pbaTexParam,id2); // id += size-1; //TOID(tx,ty+1,size) voroid = tex1Dfetch(pbaTexColor,id); // id2 = TOID(voroid.x,voroid.y,size); // float imp_u = tex1Dfetch(pbaTexParam,id2); // float imp_dx = fabsf(imp_r-imp); float imp_dy = fabsf(imp_u-imp); //float Imp = max(imp_dx,imp_dy); float Imp = (imp_dx<imp_dy) ? imp_dy : imp_dx; // importance naar salience? Imp = (Imp<fabsf(length-Imp))? Imp : fabsf(length-Imp); //importance float saliency = Imp / dt; //if (fg[id] && Imp>=threshold) output[Id] = 1; //wang.The original one is fg[id]. //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive) //if (fg[id] && saliency>=threshold) output[Id] = saliency; if (fg[id] && Imp > 2.0 && saliency>=threshold) output[Id] = saliency; //wang.The original one is fg[id]. //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive) } //WARNING: this kernel may sometimes creates 2-pixel-thick branches.. Study the AFMM original code to see if this is correct. } #define X 1 __constant__ const //REMARK: put following constants (for kernelTopology) in CUDA constant-memory, as this gives a huge speed difference unsigned char topo_patterns[][9] = { {0,0,0, //These are the 3x3 templates that we use to detect skeleton endpoints 0,X,0, //(with four 90-degree rotations for each) 0,X,0}, {0,0,0, 0,X,0, 0,0,X}, {0,0,0, 0,X,0, 0,X,X}, {0,0,0, 0,X,0, X,X,0} }; #define topo_NPATTERNS 4 //Number of patterns we try to match (for kernelTopology) //REMARK: #define faster than __constant__ __constant__ const unsigned char topo_rot[][9] = { {0,1,2,3,4,5,6,7,8}, {2,5,8,1,4,7,0,3,6}, {8,7,6,5,4,3,2,1,0}, {6,3,0,7,4,1,8,5,2} }; //These encode the four 90-degree rotations of the patterns (for kernelTopology); __device__ unsigned int topo_gc = 0; __device__ unsigned int topo_gc_last = 0; __global__ void kernelTopology(unsigned char* output, short2* output_set, short xm, short ym, short xM, short yM, short size, int maxpts) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; unsigned char t[9]; if (tx>xm && ty>ym && tx<xM-1 && ty<yM-1) //careful not to index outside the image; take into account the template size too { int id = TOID(tx, ty, size); unsigned char p = tex1Dfetch(pbaTexGray,id); //get the skeleton pixel at tx,ty if (p) //if the pixel isn't skeleton, nothing to do { unsigned char idx=0; for(int j=ty-1;j<=ty+1;++j) //read the template into t[] for easier use { int id = TOID(tx-1, j, size); for(int i=0;i<=2;++i,++id,++idx) t[idx] = tex1Dfetch(pbaTexGray,id); //get the 3x3 template centered at the skel point tx,ty } for(unsigned char r=0;r<4;++r) //try to match all rotations of a pattern: { const unsigned char* rr = topo_rot[r]; for(unsigned char p=0;p<topo_NPATTERNS;++p) //try to match all patterns: { const unsigned char* pat = topo_patterns[p]; unsigned char j = (p==0)? 0 : 7; //Speedup: for all patterns except 1st, check only last 3 entries, the first 6 are identical for all patterns for(;j<9;++j) //try to match rotated pattern vs actual pattern if (pat[j]!=t[rr[j]]) break; //this rotation failed if (j<6) break; //Speedup: if we have a mismatch on the 1st 6 pattern entries, then none of the patterns can match // since all templates have the same first 6 entries. if (j==9) //this rotation succeeded: mark the pixel as a topology event and we're done { int crt_gc = atomicInc(&topo_gc,maxpts); //REMARK: this serializes (compacts) all detected endpoints in one array. output_set[crt_gc] = make_short2(tx,ty); //To do this, we use an atomic read-increment-return on a global counter, //which is guaranteed to give all threads unique consecutive indexes in the array. output[id] = 1; //Also create the topology image return; } } } } } else //Last thread: add zero-marker to the output point-set, so the reader knows how many points are really in there if (tx==xM-1 && ty==yM-1) //Also reset the global vector counter topo_gc, for the next parallel-run of this function { topo_gc_last = topo_gc; topo_gc = 0; } //We do this in the last thread so that no one modifies topo_gc from now on. //REMARK: this seems to be the only way I can read a __device__ variable back to the CPU } void skelft2DParams(int floodBand_, int maurerBand_, int colorBand_) //Set up some params of the FT algorithm { floodBand = floodBand_; maurerBand = maurerBand_; colorBand = colorBand_; } // Compute 2D FT / Voronoi diagram of a set of sites // siteParam: Site parameterization. 0 = non-site points; >0 = site parameter value. // output: FT. The (x,y) at (i,j) are the coords of the closest site to (i,j) // size: Texture size (pow 2) void skelft2DFT(short* output, float* siteParam, short xm, short ym, short xM, short yM, int size) { skelft2DInitializeInput(siteParam,size); // Initialization of already-allocated data structures skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); // Compute FT // Copy FT to CPU, if required if (output) hipMemcpy(output, pbaTextures[1], size*size*sizeof(short2), hipMemcpyDeviceToHost); } __global__ void Interpolation(float* output, int size, int curr_bound_value, int prev_bound_value, bool firstL, int last_layer) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); float curr_val = tex1Dfetch(curr_site_tex,i); float prev_val = tex1Dfetch(prev_site_tex,i); float curr_dt = tex1Dfetch(curr_dt_tex,i); float prev_dt = tex1Dfetch(prev_dt_tex,i); if(firstL){ output[i] = prev_bound_value;//clear_color } else if(last_layer){ if (curr_val) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { float interp_last_layer_value = prev_bound_value + (curr_dt/10.0); int MaxIntensity = (last_layer+10) > 255 ? 255 : (last_layer+10); output[i] = (interp_last_layer_value > MaxIntensity) ? MaxIntensity : interp_last_layer_value; } } else{ if (!curr_val && prev_val) // If there are pixels active between boundaries, we smoothly interpolate them //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { float interp_alpha = prev_dt / ( prev_dt + curr_dt); float interp_color = curr_bound_value * interp_alpha + prev_bound_value * (1 - interp_alpha); output[i] = interp_color; } } } } void Interp(float* output, float* curr_site, float* prev_site, float* curr_dt, float* prev_dt, int curr_bound_value, int prev_bound_value, int fboSize, bool firstL, int last_layer) { pbaTexSize = fboSize; hipMemcpy(curr_site_, curr_site, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(prev_site_, prev_site, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(curr_dt_, curr_dt, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(prev_dt_, prev_dt, pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyHostToDevice); hipBindTexture(0, curr_site_tex, curr_site_); hipBindTexture(0, prev_site_tex, prev_site_); hipBindTexture(0, curr_dt_tex, curr_dt_); hipBindTexture(0, prev_dt_tex, prev_dt_); dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipLaunchKernelGGL(( Interpolation), dim3(grid),dim3(block), 0, 0, (float*)pbaTextures[7], pbaTexSize, curr_bound_value, prev_bound_value, firstL, last_layer); hipUnbindTexture(curr_site_tex); hipUnbindTexture(prev_site_tex); hipUnbindTexture(curr_dt_tex); hipUnbindTexture(prev_dt_tex); //Copy to CPU hipMemcpy(output, pbaTextures[7], pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost); } void skelft2DDT(short* outputDT, float threshold, //Compute (thresholded) DT (into pbaTextures[2]) from resident FT (in pbaTextures[1]) short xm, short ym, short xM, short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from if (threshold>=0) { xm -= threshold; if (xm<0) xm=0; ym -= threshold; if (ym<0) ym=0; xM += threshold; if (xM>pbaTexSize-1) xM=pbaTexSize-1; yM += threshold; if (yM>pbaTexSize-1) yM=pbaTexSize-1; hipLaunchKernelGGL(( kernelThresholdDT), dim3(grid), dim3(block) , 0, 0, (unsigned char*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1); hipUnbindTexture(pbaTexColor); //Copy thresholded image to CPU if (outputDT) hipMemcpy(outputDT, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost); } else { xm = ym = 0; xM = yM = pbaTexSize-1; hipLaunchKernelGGL(( kernelDT) , dim3(grid), dim3(block) , 0, 0, (short*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1); hipUnbindTexture(pbaTexColor); //Copy thresholded image to CPU if (outputDT) hipMemcpy(outputDT, pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(short), hipMemcpyDeviceToHost); } } void skelft2DSkeleton(float* outputSkel, bool* fg, float length, float threshold, //Compute thresholded skeleton (into pbaTextures[3]) from resident FT (in pbaTextures[1]) short xm,short ym,short xM,short yM) { //length: boundary length dim3 block = dim3(BLOCKX,BLOCKY); //threshold: skeleton importance min-value (below this, we ignore branches) dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the resident FT hipBindTexture(0, pbaTexParam, pbaTexSiteParam); //Used to read the resident boundary parameterization hipMemset(pbaTextures[3],0,sizeof(float)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel) hipLaunchKernelGGL(( kernelSkel), dim3(grid), dim3(block) , 0, 0, (float*)pbaTextures[3], fg, xm, ym, xM-1, yM-1, pbaTexSize, threshold, length); hipUnbindTexture(pbaTexColor); hipUnbindTexture(pbaTexParam); //Copy skeleton to CPU if (outputSkel) hipMemcpy(outputSkel, pbaTextures[3], pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost); } void skelft2DTopology(unsigned char* outputTopo, int* npts, short* outputPoints, //Compute topology-points of the resident skeleton (in pbaTextures[3]) short xm,short ym,short xM,short yM) { int maxpts = (npts)? *npts : pbaTexSize*pbaTexSize; //This is the max # topo-points we are going to return in outputPoints[] dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0, pbaTexGray, pbaTextures[3]); //Used to read the resident skeleton hipMemset(pbaTextures[4],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel) unsigned int zero = 0; hipMemcpyToSymbol(topo_gc,&zero,sizeof(unsigned int),0,hipMemcpyHostToDevice); //Set topo_gc to 0 hipLaunchKernelGGL(( kernelTopology), dim3(grid), dim3(block) , 0, 0, (unsigned char*)pbaTextures[4], pbaTextures[5], xm, ym, xM, yM, pbaTexSize, maxpts+1); hipUnbindTexture(pbaTexGray); if (outputPoints && maxpts) //If output-point vector desired, copy the end-points, put in pbaTexture[5] as a vector of short2's, { //into caller space. We copy only 'maxpts' elements, as the user instructed us. unsigned int num_pts; hipMemcpyFromSymbol(&num_pts,topo_gc_last,sizeof(unsigned int),0,hipMemcpyDeviceToHost); //Get #topo-points we have detected from the device-var from CUDA if (npts && num_pts) //Copy the topo-points to caller hipMemcpy(outputPoints,pbaTextures[5],num_pts*sizeof(short2),hipMemcpyDeviceToHost); if (npts) *npts = num_pts; //Return #detected topo-points to caller } if (outputTopo) //If topology image desired, copy it into user space hipMemcpy(outputTopo,pbaTextures[4],pbaTexSize*pbaTexSize*sizeof(unsigned char), hipMemcpyDeviceToHost); } __global__ void kernelSiteFromSkeleton(short2* outputSites, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); unsigned char param = tex1Dfetch(pbaTexGray,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points short2& v = outputSites[i]; v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT. if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { //We could use an if-then-else here, but it's faster with an if-then v.x = tx; v.y = ty; } } } __global__ void kernelSkelInterpolate(float* output, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int id = TOID(tx, ty, size); short2 vid = tex1Dfetch(pbaTexColor,id); float T = sqrtf((tx-vid.x)*(tx-vid.x)+(ty-vid.y)*(ty-vid.y)); short2 vid2 = tex1Dfetch(pbaTexColor2,id); float D = sqrtf((tx-vid2.x)*(tx-vid2.x)+(ty-vid2.y)*(ty-vid2.y)); //float B = ((D)? min(T/2/D,0.5f):0.5) + 0.5*((T)? max(1-D/T,0.0f):0); float B = ((D)? (T/2/D<0.5f)? T/2/D : 0.5f : 0.5) + 0.5*((T)? (1-D/T<0.0f)? 0.0f : (1-D/T):0); output[id] = B; } } void skel2DSkeletonDT(float* outputSkelDT,short xm,short ym,short xM,short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton hipLaunchKernelGGL(( kernelSiteFromSkeleton), dim3(grid),dim3(block), 0, 0, pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray hipUnbindTexture(pbaTexGray); //!!Must first save pbaTextures[1] since we may need it later.. hipMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice); skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6] hipMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice); hipMemcpy(pbaTextures[1],pbaTextures[5],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice); //Compute interpolation hipBindTexture(0,pbaTexColor,pbaTextures[1]); // FT of boundary hipBindTexture(0,pbaTexColor2,pbaTextures[6]); // FT of skeleton hipLaunchKernelGGL(( kernelSkelInterpolate), dim3(grid),dim3(block), 0, 0, (float*)pbaTextures[0],pbaTexSize); hipUnbindTexture(pbaTexColor); hipUnbindTexture(pbaTexColor2); if (outputSkelDT) hipMemcpy(outputSkelDT, pbaTextures[0], pbaTexSize * pbaTexSize * sizeof(float), hipMemcpyDeviceToHost); } void skel2DSkeletonFT(short* outputSkelFT,short xm,short ym,short xM,short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton hipLaunchKernelGGL(( kernelSiteFromSkeleton), dim3(grid),dim3(block), 0, 0, pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray hipUnbindTexture(pbaTexGray); //!!Must first save pbaTextures[1] since we may need it later.. hipMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice); skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6] hipMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),hipMemcpyDeviceToDevice); if (outputSkelFT) hipMemcpy(outputSkelFT, pbaTextures[6], pbaTexSize * pbaTexSize * sizeof(short2), hipMemcpyDeviceToHost); } __device__ bool fill_gc; //Indicates if a fill-sweep did fill anything or not __global__ void kernelFill(unsigned char* output, int size, unsigned char bg, unsigned char fg, short xm, short ym, short xM, short yM, bool ne) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id0 = TOID(tx, ty, size); unsigned char val = tex1Dfetch(pbaTexGray,id0); // if (val==fg) //do we have a filled pixel? Then fill all to left/top/up/bottom of it which is background { bool fill = false; int id = id0; if (ne) //fill in north-east direction: { for(short x=tx+1;x<xM;++x) //REMARK: here and below, the interesting thing is that it's faster, by about 10-15%, to fill a whole { // scanline rather than oly until the current block's borders (+1). The reason is that filling a whole // scanline decreases the total #sweeps, which seems to be the limiting speed factor if (tex1Dfetch(pbaTexGray,++id)!=bg) break; output[id] = fg; fill = true; } id = id0; for(short y=ty-1;y>ym;--y) { if (tex1Dfetch(pbaTexGray,id-=size)!=bg) break; output[id] = fg; fill = true; } } else //fill in south-west direction: { for(short x=tx-1;x>xm;--x) { if (tex1Dfetch(pbaTexGray,--id)!=bg) break; output[id] = fg; fill = true; } id = id0; for(short y=ty+1;y<yM;++y) { if (tex1Dfetch(pbaTexGray,id+=size)!=bg) break; output[id] = fg; fill = true; } } if (fill) fill_gc = true; //if we filled anything, inform caller; we 'gather' this info from a local var into the //global var here, since it's faster than writing the global var in the for loops } } } __global__ void kernelFillHoles(unsigned char* output, int size, unsigned char bg, unsigned char fg, unsigned char fill_fg) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>=0 && ty>=0 && tx<size && ty<size) //careful not to index outside the image.. { int id = TOID(tx, ty, size); unsigned char val = tex1Dfetch(pbaTexGray,id); // if (val==fill_fg) output[id] = bg; else if (val==bg) output[id] = fg; } } int skelft2DFill(unsigned char* outputFill, short sx, short sy, short xm, short ym, short xM, short yM, unsigned char fill_value) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); unsigned char background; int id = sy * pbaTexSize + sx; hipMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),hipMemcpyDeviceToHost); //See which is the value we have to fill from (sx,sy) hipMemset(((unsigned char*)pbaTextures[2])+id,fill_value,sizeof(unsigned char)); //Fill the seed (x,y) on the GPU hipBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT int iter=0; bool xy = true; //Direction of filling for current sweep: either north-east or south-west //This kind of balances the memory-accesses nicely over kernel calls for(;;++iter,xy=!xy) //Keep filling a sweep at a time until we have no background pixels anymore { bool filled = false; //Initialize flag: we didn't fill anything in this sweep hipMemcpyToSymbol(fill_gc,&filled,sizeof(bool),0,hipMemcpyHostToDevice); //Pass flag to CUDA hipLaunchKernelGGL(( kernelFill), dim3(grid), dim3(block), 0, 0, (unsigned char*)pbaTextures[2],pbaTexSize,background,fill_value,xm,ym,xM,yM,xy); //One fill sweep hipMemcpyFromSymbol(&filled,fill_gc,sizeof(bool),0,hipMemcpyDeviceToHost); //See if we filled anything in this sweep if (!filled) break; //Nothing filled? Then we're done, the image didn't change } hipUnbindTexture(pbaTexGray); if (outputFill) hipMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost); return iter; //Return #iterations done for the fill - useful as a performance measure for caller } int skelft2DFillHoles(unsigned char* outputFill, short sx, short sy, unsigned char foreground) { unsigned char background; unsigned char fill_value = 128; int id = sy * pbaTexSize + sx; hipMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),hipMemcpyDeviceToHost); //See which is the value at (sx,sy) int iter = skelft2DFill(0,sx,sy,0,0,pbaTexSize,pbaTexSize,fill_value); //First, fill the background surrounding the image with some special value dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); hipBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT hipLaunchKernelGGL(( kernelFillHoles), dim3(grid), dim3(block), 0, 0, (unsigned char*)pbaTextures[2],pbaTexSize,background,foreground,fill_value); hipUnbindTexture(pbaTexGray); if (outputFill) hipMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), hipMemcpyDeviceToHost); return iter; }
2dda17e0518d8c612948c98b41bc913025259ef9.cu
//#include <device_functions.h> #include <cuda_runtime_api.h> #include "skelft.h" #include <stdio.h> // Parameters for CUDA kernel executions; more or less optimized for a 1024x1024 image. #define BLOCKX 16 #define BLOCKY 16 #define BLOCKSIZE 64 #define TILE_DIM 32 #define BLOCK_ROWS 16 //#define max(x,y) (x<y)? y : x //#define min(x,y) (x<y)? x : y /****** Global Variables *******/ const int NB = 8; // Nr buffers we use and store in the entire framework short2 **pbaTextures; // Work buffers used to compute and store resident images // 0: work buffer // 1: FT // 2: thresholded DT // 3: thresholded skeleton // 4: topology analysis // 5: work buffer for topology // 6: skeleton FT // float* pbaTexSiteParam; // Stores boundary parameterization float* curr_site_, *prev_site_, *curr_dt_, *prev_dt_; int pbaTexSize; // Texture size (squared) actually used in all computations int floodBand = 4, // Various FT computation parameters; defaults are good for an 1024x1024 image. maurerBand = 4, colorBand = 4; texture<short2> pbaTexColor; // 2D textures (bound to various buffers defined above as needed) texture<short2> pbaTexColor2; // texture<short2> pbaTexLinks; texture<float> pbaTexParam; // 1D site parameterization texture (bound to pbaTexSiteParam) texture<float> curr_site_tex, curr_dt_tex, prev_site_tex, prev_dt_tex; texture<unsigned char> pbaTexGray; // 2D texture of unsigned char values, e.g. the binary skeleton #if __CUDA_ARCH__ < 110 // We cannot use atomic intrinsics on SM10 or below. Thus, we define these as nop. #define atomicInc(a,b) 0 // The default will be that some code e.g. endpoint detection will thus not do anything. #endif /********* Kernels ********/ #include "skelftKernel.h" // Initialize necessary memory (CPU/GPU sides) // - textureSize: The max size of any image we will process until re-initialization void skelft2DInitialization(int maxTexSize) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp,0); // Query device properties, list something about them int pbaMemSize = maxTexSize * maxTexSize * sizeof(short2); // A buffer has 2 shorts / pixel pbaTextures = (short2 **) malloc(NB * sizeof(short2*)); // We will use NB buffers for(int i=0;i<NB;++i) cudaMalloc((void **) &pbaTextures[i], pbaMemSize); // Allocate work buffer 'i' cudaMalloc((void **) &pbaTexSiteParam, maxTexSize * maxTexSize * sizeof(float)); // Sites texture cudaMalloc((void **) &curr_site_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture cudaMalloc((void **) &prev_site_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture cudaMalloc((void **) &curr_dt_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture cudaMalloc((void **) &prev_dt_, maxTexSize * maxTexSize * sizeof(float)); // Sites texture } // Deallocate all allocated memory void skelft2DDeinitialization() { for(int i=0;i<NB;++i) cudaFree(pbaTextures[i]); cudaFree(pbaTexSiteParam); cudaFree(curr_site_); cudaFree(prev_site_); cudaFree(curr_dt_); cudaFree(prev_dt_); free(pbaTextures); } __global__ void kernelSiteParamInit(short2* inputVoro, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); float param = tex1Dfetch(pbaTexParam,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points short2& v = inputVoro[i]; v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT. if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { //We could use an if-then-else here, but it's faster with an if-then v.x = tx; v.y = ty; } } } void skelft2DInitializeInput(float* sites, int size) // Copy input sites from CPU to GPU; Also set up site param initialization in pbaTextures[0] { pbaTexSize = size; // Size of the actual texture being used in this run; can be smaller than the max-tex-size // which was used in skelft2DInitialization() cudaMemcpy(pbaTexSiteParam, sites, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); // Pass sites parameterization to CUDA. Must be done before calling the initialization // kernel, since we use the sites-param as a texture in that kernel cudaBindTexture(0, pbaTexParam, pbaTexSiteParam); // Bind the sites-param as a 1D texture so we can quickly index it next dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); kernelSiteParamInit<<<grid,block>>>(pbaTextures[0],pbaTexSize); // Do the site param initialization. This sets up pbaTextures[0] cudaUnbindTexture(pbaTexParam); } // In-place transpose a squared texture. // Block orders are modified to optimize memory access. // Point coordinates are also swapped. void pba2DTranspose(short2 *texture) { dim3 block(TILE_DIM, BLOCK_ROWS); dim3 grid(pbaTexSize / TILE_DIM, pbaTexSize / TILE_DIM); cudaBindTexture(0, pbaTexColor, texture); kernelTranspose<<< grid, block >>>(texture, pbaTexSize); cudaUnbindTexture(pbaTexColor); } // Phase 1 of PBA. m1 must divides texture size void pba2DPhase1(int m1, short xm, short ym, short xM, short yM) { dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m1); // Flood vertically in their own bands cudaBindTexture(0, pbaTexColor, pbaTextures[0]); kernelFloodDown<<< grid, block >>>(pbaTextures[1], pbaTexSize, pbaTexSize / m1); cudaUnbindTexture(pbaTexColor); cudaBindTexture(0, pbaTexColor, pbaTextures[1]); kernelFloodUp<<< grid, block >>>(pbaTextures[1], pbaTexSize, pbaTexSize / m1); // Passing information between bands grid = dim3(pbaTexSize / block.x, m1); kernelPropagateInterband<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m1); cudaBindTexture(0, pbaTexLinks, pbaTextures[0]); kernelUpdateVertical<<< grid, block >>>(pbaTextures[1], pbaTexSize, m1, pbaTexSize / m1); cudaUnbindTexture(pbaTexLinks); cudaUnbindTexture(pbaTexColor); } // Phase 2 of PBA. m2 must divides texture size void pba2DPhase2(int m2) { // Compute proximate points locally in each band dim3 block = dim3(BLOCKSIZE); dim3 grid = dim3(pbaTexSize / block.x, m2); cudaBindTexture(0, pbaTexColor, pbaTextures[1]); kernelProximatePoints<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m2); cudaBindTexture(0, pbaTexLinks, pbaTextures[0]); kernelCreateForwardPointers<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / m2); // Repeatly merging two bands into one for (int noBand = m2; noBand > 1; noBand /= 2) { grid = dim3(pbaTexSize / block.x, noBand / 2); kernelMergeBands<<< grid, block >>>(pbaTextures[0], pbaTexSize, pbaTexSize / noBand); } // Replace the forward link with the X coordinate of the seed to remove // the need of looking at the other texture. We need it for coloring. grid = dim3(pbaTexSize / block.x, pbaTexSize); kernelDoubleToSingleList<<< grid, block >>>(pbaTextures[0], pbaTexSize); cudaUnbindTexture(pbaTexLinks); cudaUnbindTexture(pbaTexColor); } // Phase 3 of PBA. m3 must divides texture size void pba2DPhase3(int m3) { dim3 block = dim3(BLOCKSIZE / m3, m3); dim3 grid = dim3(pbaTexSize / block.x); cudaBindTexture(0, pbaTexColor, pbaTextures[0]); kernelColor<<< grid, block >>>(pbaTextures[1], pbaTexSize); cudaUnbindTexture(pbaTexColor); } void skel2DFTCompute(short xm, short ym, short xM, short yM, int floodBand, int maurerBand, int colorBand) { pba2DPhase1(floodBand,xm,ym,xM,yM); //Vertical sweep pba2DTranspose(pbaTextures[1]); // pba2DPhase2(maurerBand); //Horizontal coloring pba2DPhase3(colorBand); //Row coloring pba2DTranspose(pbaTextures[1]); } __global__ void kernelThresholdDT(unsigned char* output, int size, float threshold2, short xm, short ym, short xM, short yM) //Input: pbaTexColor: closest-site-ids per pixel, i.e. FT //Output: output: thresholded DT { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id = TOID(tx, ty, size); short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); output[id] = (d2<=threshold2); //threshold DT into binary image } } __global__ void kernelDT(short* output, int size, float threshold2, short xm, short ym, short xM, short yM) //Input: pbaTexColor: closest-site-ids per pixel, i.e. FT //Output: output: DT { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id = TOID(tx, ty, size); short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); output[id] = sqrtf(d2); //save the Euclidean DT } } __global__ void kernelSkel(float* output, bool* fg, short xm, short ym, short xM, short yM, short size, float threshold, float length) //Input: pbaTexColor: closest-site-ids per pixel // pbaTexParam: labels for sites (only valid at site locations) { //Output: output: binary thresholded skeleton int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) { int id = TOID(tx, ty, size); int Id = id; short2 voroid = tex1Dfetch(pbaTexColor,id); //get the closest-site to tx,ty into voroid.x,.y int id2 = TOID(voroid.x,voroid.y,size); //convert the site's coord to an index into pbaTexParam[], the site-label-texture float imp = tex1Dfetch(pbaTexParam,id2); //get the site's label float d2 = (tx-voroid.x)*(tx-voroid.x)+(ty-voroid.y)*(ty-voroid.y); float dt = sqrtf(d2); //save the Euclidean DT ++id; //TOID(tx+1,ty,size) voroid = tex1Dfetch(pbaTexColor,id); // id2 = TOID(voroid.x,voroid.y,size); // float imp_r = tex1Dfetch(pbaTexParam,id2); // id += size-1; //TOID(tx,ty+1,size) voroid = tex1Dfetch(pbaTexColor,id); // id2 = TOID(voroid.x,voroid.y,size); // float imp_u = tex1Dfetch(pbaTexParam,id2); // float imp_dx = fabsf(imp_r-imp); float imp_dy = fabsf(imp_u-imp); //float Imp = max(imp_dx,imp_dy); float Imp = (imp_dx<imp_dy) ? imp_dy : imp_dx; // importance naar salience? Imp = (Imp<fabsf(length-Imp))? Imp : fabsf(length-Imp); //importance float saliency = Imp / dt; //if (fg[id] && Imp>=threshold) output[Id] = 1; //wang.The original one is fg[id]. //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive) //if (fg[id] && saliency>=threshold) output[Id] = saliency; if (fg[id] && Imp > 2.0 && saliency>=threshold) output[Id] = saliency; //wang.The original one is fg[id]. //By filling only 1-values, we reduce memory access somehow (writing to output[] is expensive) } //WARNING: this kernel may sometimes creates 2-pixel-thick branches.. Study the AFMM original code to see if this is correct. } #define X 1 __constant__ const //REMARK: put following constants (for kernelTopology) in CUDA constant-memory, as this gives a huge speed difference unsigned char topo_patterns[][9] = { {0,0,0, //These are the 3x3 templates that we use to detect skeleton endpoints 0,X,0, //(with four 90-degree rotations for each) 0,X,0}, {0,0,0, 0,X,0, 0,0,X}, {0,0,0, 0,X,0, 0,X,X}, {0,0,0, 0,X,0, X,X,0} }; #define topo_NPATTERNS 4 //Number of patterns we try to match (for kernelTopology) //REMARK: #define faster than __constant__ __constant__ const unsigned char topo_rot[][9] = { {0,1,2,3,4,5,6,7,8}, {2,5,8,1,4,7,0,3,6}, {8,7,6,5,4,3,2,1,0}, {6,3,0,7,4,1,8,5,2} }; //These encode the four 90-degree rotations of the patterns (for kernelTopology); __device__ unsigned int topo_gc = 0; __device__ unsigned int topo_gc_last = 0; __global__ void kernelTopology(unsigned char* output, short2* output_set, short xm, short ym, short xM, short yM, short size, int maxpts) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; unsigned char t[9]; if (tx>xm && ty>ym && tx<xM-1 && ty<yM-1) //careful not to index outside the image; take into account the template size too { int id = TOID(tx, ty, size); unsigned char p = tex1Dfetch(pbaTexGray,id); //get the skeleton pixel at tx,ty if (p) //if the pixel isn't skeleton, nothing to do { unsigned char idx=0; for(int j=ty-1;j<=ty+1;++j) //read the template into t[] for easier use { int id = TOID(tx-1, j, size); for(int i=0;i<=2;++i,++id,++idx) t[idx] = tex1Dfetch(pbaTexGray,id); //get the 3x3 template centered at the skel point tx,ty } for(unsigned char r=0;r<4;++r) //try to match all rotations of a pattern: { const unsigned char* rr = topo_rot[r]; for(unsigned char p=0;p<topo_NPATTERNS;++p) //try to match all patterns: { const unsigned char* pat = topo_patterns[p]; unsigned char j = (p==0)? 0 : 7; //Speedup: for all patterns except 1st, check only last 3 entries, the first 6 are identical for all patterns for(;j<9;++j) //try to match rotated pattern vs actual pattern if (pat[j]!=t[rr[j]]) break; //this rotation failed if (j<6) break; //Speedup: if we have a mismatch on the 1st 6 pattern entries, then none of the patterns can match // since all templates have the same first 6 entries. if (j==9) //this rotation succeeded: mark the pixel as a topology event and we're done { int crt_gc = atomicInc(&topo_gc,maxpts); //REMARK: this serializes (compacts) all detected endpoints in one array. output_set[crt_gc] = make_short2(tx,ty); //To do this, we use an atomic read-increment-return on a global counter, //which is guaranteed to give all threads unique consecutive indexes in the array. output[id] = 1; //Also create the topology image return; } } } } } else //Last thread: add zero-marker to the output point-set, so the reader knows how many points are really in there if (tx==xM-1 && ty==yM-1) //Also reset the global vector counter topo_gc, for the next parallel-run of this function { topo_gc_last = topo_gc; topo_gc = 0; } //We do this in the last thread so that no one modifies topo_gc from now on. //REMARK: this seems to be the only way I can read a __device__ variable back to the CPU } void skelft2DParams(int floodBand_, int maurerBand_, int colorBand_) //Set up some params of the FT algorithm { floodBand = floodBand_; maurerBand = maurerBand_; colorBand = colorBand_; } // Compute 2D FT / Voronoi diagram of a set of sites // siteParam: Site parameterization. 0 = non-site points; >0 = site parameter value. // output: FT. The (x,y) at (i,j) are the coords of the closest site to (i,j) // size: Texture size (pow 2) void skelft2DFT(short* output, float* siteParam, short xm, short ym, short xM, short yM, int size) { skelft2DInitializeInput(siteParam,size); // Initialization of already-allocated data structures skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); // Compute FT // Copy FT to CPU, if required if (output) cudaMemcpy(output, pbaTextures[1], size*size*sizeof(short2), cudaMemcpyDeviceToHost); } __global__ void Interpolation(float* output, int size, int curr_bound_value, int prev_bound_value, bool firstL, int last_layer) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); float curr_val = tex1Dfetch(curr_site_tex,i); float prev_val = tex1Dfetch(prev_site_tex,i); float curr_dt = tex1Dfetch(curr_dt_tex,i); float prev_dt = tex1Dfetch(prev_dt_tex,i); if(firstL){ output[i] = prev_bound_value;//clear_color } else if(last_layer){ if (curr_val) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { float interp_last_layer_value = prev_bound_value + (curr_dt/10.0); int MaxIntensity = (last_layer+10) > 255 ? 255 : (last_layer+10); output[i] = (interp_last_layer_value > MaxIntensity) ? MaxIntensity : interp_last_layer_value; } } else{ if (!curr_val && prev_val) // If there are pixels active between boundaries, we smoothly interpolate them //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { float interp_alpha = prev_dt / ( prev_dt + curr_dt); float interp_color = curr_bound_value * interp_alpha + prev_bound_value * (1 - interp_alpha); output[i] = interp_color; } } } } void Interp(float* output, float* curr_site, float* prev_site, float* curr_dt, float* prev_dt, int curr_bound_value, int prev_bound_value, int fboSize, bool firstL, int last_layer) { pbaTexSize = fboSize; cudaMemcpy(curr_site_, curr_site, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(prev_site_, prev_site, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(curr_dt_, curr_dt, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(prev_dt_, prev_dt, pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyHostToDevice); cudaBindTexture(0, curr_site_tex, curr_site_); cudaBindTexture(0, prev_site_tex, prev_site_); cudaBindTexture(0, curr_dt_tex, curr_dt_); cudaBindTexture(0, prev_dt_tex, prev_dt_); dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); Interpolation<<<grid,block>>>((float*)pbaTextures[7], pbaTexSize, curr_bound_value, prev_bound_value, firstL, last_layer); cudaUnbindTexture(curr_site_tex); cudaUnbindTexture(prev_site_tex); cudaUnbindTexture(curr_dt_tex); cudaUnbindTexture(prev_dt_tex); //Copy to CPU cudaMemcpy(output, pbaTextures[7], pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost); } void skelft2DDT(short* outputDT, float threshold, //Compute (thresholded) DT (into pbaTextures[2]) from resident FT (in pbaTextures[1]) short xm, short ym, short xM, short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the FT from if (threshold>=0) { xm -= threshold; if (xm<0) xm=0; ym -= threshold; if (ym<0) ym=0; xM += threshold; if (xM>pbaTexSize-1) xM=pbaTexSize-1; yM += threshold; if (yM>pbaTexSize-1) yM=pbaTexSize-1; kernelThresholdDT<<< grid, block >>>((unsigned char*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1); cudaUnbindTexture(pbaTexColor); //Copy thresholded image to CPU if (outputDT) cudaMemcpy(outputDT, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost); } else { xm = ym = 0; xM = yM = pbaTexSize-1; kernelDT <<< grid, block >>>((short*)pbaTextures[2], pbaTexSize, threshold*threshold, xm-1, ym-1, xM+1, yM+1); cudaUnbindTexture(pbaTexColor); //Copy thresholded image to CPU if (outputDT) cudaMemcpy(outputDT, pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(short), cudaMemcpyDeviceToHost); } } void skelft2DSkeleton(float* outputSkel, bool* fg, float length, float threshold, //Compute thresholded skeleton (into pbaTextures[3]) from resident FT (in pbaTextures[1]) short xm,short ym,short xM,short yM) { //length: boundary length dim3 block = dim3(BLOCKX,BLOCKY); //threshold: skeleton importance min-value (below this, we ignore branches) dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0, pbaTexColor, pbaTextures[1]); //Used to read the resident FT cudaBindTexture(0, pbaTexParam, pbaTexSiteParam); //Used to read the resident boundary parameterization cudaMemset(pbaTextures[3],0,sizeof(float)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel) kernelSkel<<< grid, block >>>((float*)pbaTextures[3], fg, xm, ym, xM-1, yM-1, pbaTexSize, threshold, length); cudaUnbindTexture(pbaTexColor); cudaUnbindTexture(pbaTexParam); //Copy skeleton to CPU if (outputSkel) cudaMemcpy(outputSkel, pbaTextures[3], pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost); } void skelft2DTopology(unsigned char* outputTopo, int* npts, short* outputPoints, //Compute topology-points of the resident skeleton (in pbaTextures[3]) short xm,short ym,short xM,short yM) { int maxpts = (npts)? *npts : pbaTexSize*pbaTexSize; //This is the max # topo-points we are going to return in outputPoints[] dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0, pbaTexGray, pbaTextures[3]); //Used to read the resident skeleton cudaMemset(pbaTextures[4],0,sizeof(unsigned char)*pbaTexSize*pbaTexSize); //Faster to zero result and then fill only 1-values (see kernel) unsigned int zero = 0; cudaMemcpyToSymbol(topo_gc,&zero,sizeof(unsigned int),0,cudaMemcpyHostToDevice); //Set topo_gc to 0 kernelTopology<<< grid, block >>>((unsigned char*)pbaTextures[4], pbaTextures[5], xm, ym, xM, yM, pbaTexSize, maxpts+1); cudaUnbindTexture(pbaTexGray); if (outputPoints && maxpts) //If output-point vector desired, copy the end-points, put in pbaTexture[5] as a vector of short2's, { //into caller space. We copy only 'maxpts' elements, as the user instructed us. unsigned int num_pts; cudaMemcpyFromSymbol(&num_pts,topo_gc_last,sizeof(unsigned int),0,cudaMemcpyDeviceToHost); //Get #topo-points we have detected from the device-var from CUDA if (npts && num_pts) //Copy the topo-points to caller cudaMemcpy(outputPoints,pbaTextures[5],num_pts*sizeof(short2),cudaMemcpyDeviceToHost); if (npts) *npts = num_pts; //Return #detected topo-points to caller } if (outputTopo) //If topology image desired, copy it into user space cudaMemcpy(outputTopo,pbaTextures[4],pbaTexSize*pbaTexSize*sizeof(unsigned char), cudaMemcpyDeviceToHost); } __global__ void kernelSiteFromSkeleton(short2* outputSites, int size) //Initialize the Voronoi textures from the sites' encoding texture (parameterization) { //REMARK: we interpret 'inputVoro' as a 2D texture, as it's much easier/faster like this int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int i = TOID(tx,ty,size); unsigned char param = tex1Dfetch(pbaTexGray,i); //The sites-param has non-zero (parameter) values precisely on non-boundary points short2& v = outputSites[i]; v.x = v.y = MARKER; //Non-boundary points are marked as 0 in the parameterization. Here we will compute the FT. if (param) //These are points which define the 'sites' to compute the FT/skeleton (thus, have FT==identity) { //We could use an if-then-else here, but it's faster with an if-then v.x = tx; v.y = ty; } } } __global__ void kernelSkelInterpolate(float* output, int size) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx<size && ty<size) //Careful not to go outside the image.. { int id = TOID(tx, ty, size); short2 vid = tex1Dfetch(pbaTexColor,id); float T = sqrtf((tx-vid.x)*(tx-vid.x)+(ty-vid.y)*(ty-vid.y)); short2 vid2 = tex1Dfetch(pbaTexColor2,id); float D = sqrtf((tx-vid2.x)*(tx-vid2.x)+(ty-vid2.y)*(ty-vid2.y)); //float B = ((D)? min(T/2/D,0.5f):0.5) + 0.5*((T)? max(1-D/T,0.0f):0); float B = ((D)? (T/2/D<0.5f)? T/2/D : 0.5f : 0.5) + 0.5*((T)? (1-D/T<0.0f)? 0.0f : (1-D/T):0); output[id] = B; } } void skel2DSkeletonDT(float* outputSkelDT,short xm,short ym,short xM,short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton kernelSiteFromSkeleton<<<grid,block>>>(pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray cudaUnbindTexture(pbaTexGray); //!!Must first save pbaTextures[1] since we may need it later.. cudaMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice); skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6] cudaMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice); cudaMemcpy(pbaTextures[1],pbaTextures[5],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice); //Compute interpolation cudaBindTexture(0,pbaTexColor,pbaTextures[1]); // FT of boundary cudaBindTexture(0,pbaTexColor2,pbaTextures[6]); // FT of skeleton kernelSkelInterpolate<<<grid,block>>>((float*)pbaTextures[0],pbaTexSize); cudaUnbindTexture(pbaTexColor); cudaUnbindTexture(pbaTexColor2); if (outputSkelDT) cudaMemcpy(outputSkelDT, pbaTextures[0], pbaTexSize * pbaTexSize * sizeof(float), cudaMemcpyDeviceToHost); } void skel2DSkeletonFT(short* outputSkelFT,short xm,short ym,short xM,short yM) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0,pbaTexGray,pbaTextures[3]); //Used to read the resident binary skeleton kernelSiteFromSkeleton<<<grid,block>>>(pbaTextures[0],pbaTexSize); //1. Init pbaTextures[0] with sites on skeleton i.e. from pbaTexGray cudaUnbindTexture(pbaTexGray); //!!Must first save pbaTextures[1] since we may need it later.. cudaMemcpy(pbaTextures[5],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice); skel2DFTCompute(xm, ym, xM, yM, floodBand, maurerBand, colorBand); //2. Compute FT of the skeleton into pbaTextures[6] cudaMemcpy(pbaTextures[6],pbaTextures[1],pbaTexSize*pbaTexSize*sizeof(short2),cudaMemcpyDeviceToDevice); if (outputSkelFT) cudaMemcpy(outputSkelFT, pbaTextures[6], pbaTexSize * pbaTexSize * sizeof(short2), cudaMemcpyDeviceToHost); } __device__ bool fill_gc; //Indicates if a fill-sweep did fill anything or not __global__ void kernelFill(unsigned char* output, int size, unsigned char bg, unsigned char fg, short xm, short ym, short xM, short yM, bool ne) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>xm && ty>ym && tx<xM && ty<yM) //careful not to index outside the image.. { int id0 = TOID(tx, ty, size); unsigned char val = tex1Dfetch(pbaTexGray,id0); // if (val==fg) //do we have a filled pixel? Then fill all to left/top/up/bottom of it which is background { bool fill = false; int id = id0; if (ne) //fill in north-east direction: { for(short x=tx+1;x<xM;++x) //REMARK: here and below, the interesting thing is that it's faster, by about 10-15%, to fill a whole { // scanline rather than oly until the current block's borders (+1). The reason is that filling a whole // scanline decreases the total #sweeps, which seems to be the limiting speed factor if (tex1Dfetch(pbaTexGray,++id)!=bg) break; output[id] = fg; fill = true; } id = id0; for(short y=ty-1;y>ym;--y) { if (tex1Dfetch(pbaTexGray,id-=size)!=bg) break; output[id] = fg; fill = true; } } else //fill in south-west direction: { for(short x=tx-1;x>xm;--x) { if (tex1Dfetch(pbaTexGray,--id)!=bg) break; output[id] = fg; fill = true; } id = id0; for(short y=ty+1;y<yM;++y) { if (tex1Dfetch(pbaTexGray,id+=size)!=bg) break; output[id] = fg; fill = true; } } if (fill) fill_gc = true; //if we filled anything, inform caller; we 'gather' this info from a local var into the //global var here, since it's faster than writing the global var in the for loops } } } __global__ void kernelFillHoles(unsigned char* output, int size, unsigned char bg, unsigned char fg, unsigned char fill_fg) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx>=0 && ty>=0 && tx<size && ty<size) //careful not to index outside the image.. { int id = TOID(tx, ty, size); unsigned char val = tex1Dfetch(pbaTexGray,id); // if (val==fill_fg) output[id] = bg; else if (val==bg) output[id] = fg; } } int skelft2DFill(unsigned char* outputFill, short sx, short sy, short xm, short ym, short xM, short yM, unsigned char fill_value) { dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); unsigned char background; int id = sy * pbaTexSize + sx; cudaMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),cudaMemcpyDeviceToHost); //See which is the value we have to fill from (sx,sy) cudaMemset(((unsigned char*)pbaTextures[2])+id,fill_value,sizeof(unsigned char)); //Fill the seed (x,y) on the GPU cudaBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT int iter=0; bool xy = true; //Direction of filling for current sweep: either north-east or south-west //This kind of balances the memory-accesses nicely over kernel calls for(;;++iter,xy=!xy) //Keep filling a sweep at a time until we have no background pixels anymore { bool filled = false; //Initialize flag: we didn't fill anything in this sweep cudaMemcpyToSymbol(fill_gc,&filled,sizeof(bool),0,cudaMemcpyHostToDevice); //Pass flag to CUDA kernelFill<<<grid, block>>>((unsigned char*)pbaTextures[2],pbaTexSize,background,fill_value,xm,ym,xM,yM,xy); //One fill sweep cudaMemcpyFromSymbol(&filled,fill_gc,sizeof(bool),0,cudaMemcpyDeviceToHost); //See if we filled anything in this sweep if (!filled) break; //Nothing filled? Then we're done, the image didn't change } cudaUnbindTexture(pbaTexGray); if (outputFill) cudaMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost); return iter; //Return #iterations done for the fill - useful as a performance measure for caller } int skelft2DFillHoles(unsigned char* outputFill, short sx, short sy, unsigned char foreground) { unsigned char background; unsigned char fill_value = 128; int id = sy * pbaTexSize + sx; cudaMemcpy(&background,(unsigned char*)pbaTextures[2]+id,sizeof(unsigned char),cudaMemcpyDeviceToHost); //See which is the value at (sx,sy) int iter = skelft2DFill(0,sx,sy,0,0,pbaTexSize,pbaTexSize,fill_value); //First, fill the background surrounding the image with some special value dim3 block = dim3(BLOCKX,BLOCKY); dim3 grid = dim3(pbaTexSize/block.x,pbaTexSize/block.y); cudaBindTexture(0, pbaTexGray, pbaTextures[2]); //Used to read the thresholded DT kernelFillHoles<<<grid, block>>>((unsigned char*)pbaTextures[2],pbaTexSize,background,foreground,fill_value); cudaUnbindTexture(pbaTexGray); if (outputFill) cudaMemcpy(outputFill, (unsigned char*)pbaTextures[2], pbaTexSize * pbaTexSize * sizeof(unsigned char), cudaMemcpyDeviceToHost); return iter; }
2833cd65a9460ecf9d6f652cbafe88dfa80e63c7.hip
// !!! This is a file automatically generated by hipify!!! // RUN: %run_test hipify "%s" "%t" %hipify_args -D__CUDA_API_VERSION_INTERNAL %clang_args // CHECK: #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> int main() { printf("04. CUDA Driver API Defines synthetic test\n"); // CHECK: #define __HIPCC__; #define __HIPCC__; // CHECK: int DEVICE_CPU = hipCpuDeviceId; // CHECK-NEXT: int DEVICE_INVALID = hipInvalidDeviceId; // CHECK-NEXT: int IPC_HANDLE_SIZE = HIP_IPC_HANDLE_SIZE; int DEVICE_CPU = CU_DEVICE_CPU; int DEVICE_INVALID = CU_DEVICE_INVALID; int IPC_HANDLE_SIZE = HIP_IPC_HANDLE_SIZE; // CHECK: void* LAUNCH_PARAM_BUFFER_POINTER = HIP_LAUNCH_PARAM_BUFFER_POINTER; // CHECK-NEXT: void* LAUNCH_PARAM_BUFFER_SIZE = HIP_LAUNCH_PARAM_BUFFER_SIZE; // CHECK-NEXT: void* LAUNCH_PARAM_END = HIP_LAUNCH_PARAM_END; void* LAUNCH_PARAM_BUFFER_POINTER = HIP_LAUNCH_PARAM_BUFFER_POINTER; void* LAUNCH_PARAM_BUFFER_SIZE = HIP_LAUNCH_PARAM_BUFFER_SIZE; void* LAUNCH_PARAM_END = HIP_LAUNCH_PARAM_END; // CHECK: int MEMHOSTALLOC_PORTABLE = hipHostMallocPortable; // CHECK-NEXT: int MEMHOSTALLOC_DEVICEMAP = hipHostMallocMapped; // CHECK-NEXT: int MEMHOSTALLOC_WRITECOMBINED = hipHostMallocWriteCombined; // CHECK-NEXT: int MEMHOSTREGISTER_PORTABLE = hipHostRegisterPortable; // CHECK-NEXT: int MEMHOSTREGISTER_DEVICEMAP = hipHostRegisterMapped; // CHECK-NEXT: int MEMHOSTREGISTER_IOMEMORY = hipHostRegisterIoMemory; int MEMHOSTALLOC_PORTABLE = HIP_MEMHOSTALLOC_PORTABLE; int MEMHOSTALLOC_DEVICEMAP = HIP_MEMHOSTALLOC_DEVICEMAP; int MEMHOSTALLOC_WRITECOMBINED = HIP_MEMHOSTALLOC_WRITECOMBINED; int MEMHOSTREGISTER_PORTABLE = HIP_MEMHOSTREGISTER_PORTABLE; int MEMHOSTREGISTER_DEVICEMAP = HIP_MEMHOSTREGISTER_DEVICEMAP; int MEMHOSTREGISTER_IOMEMORY = HIP_MEMHOSTREGISTER_IOMEMORY; // CHECK: int TRSA_OVERRIDE_FORMAT = HIP_TRSA_OVERRIDE_FORMAT; // CHECK-NEXT: int TRSF_NORMALIZED_COORDINATES = HIP_TRSF_NORMALIZED_COORDINATES; // CHECK-NEXT: int TRSF_READ_AS_INTEGER = HIP_TRSF_READ_AS_INTEGER; // CHECK-NEXT: int TRSF_SRGB = HIP_TRSF_SRGB; // CHECK-NEXT: hipStream_t STREAM_PER_THREAD = hipStreamPerThread; int TRSA_OVERRIDE_FORMAT = HIP_TRSA_OVERRIDE_FORMAT; int TRSF_NORMALIZED_COORDINATES = HIP_TRSF_NORMALIZED_COORDINATES; int TRSF_READ_AS_INTEGER = HIP_TRSF_READ_AS_INTEGER; int TRSF_SRGB = HIP_TRSF_SRGB; hipStream_t STREAM_PER_THREAD = HIP_STREAM_PER_THREAD; // CHECK: int ARRAY3D_LAYERED = hipArrayLayered; // CHECK-NEXT: int ARRAY3D_SURFACE_LDST = hipArraySurfaceLoadStore; // CHECK-NEXT: int ARRAY3D_CUBEMAP = hipArrayCubemap; // CHECK-NEXT: int ARRAY3D_TEXTURE_GATHER = hipArrayTextureGather; int ARRAY3D_LAYERED = HIP_ARRAY3D_LAYERED; int ARRAY3D_SURFACE_LDST = HIP_ARRAY3D_SURFACE_LDST; int ARRAY3D_CUBEMAP = HIP_ARRAY3D_CUBEMAP; int ARRAY3D_TEXTURE_GATHER = HIP_ARRAY3D_TEXTURE_GATHER; // CHECK: int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = hipCooperativeLaunchMultiDeviceNoPreSync; // CHECK-NEXT: int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = hipCooperativeLaunchMultiDeviceNoPostSync; int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC; int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC; return 0; }
2833cd65a9460ecf9d6f652cbafe88dfa80e63c7.cu
// RUN: %run_test hipify "%s" "%t" %hipify_args -D__CUDA_API_VERSION_INTERNAL %clang_args // CHECK: #include <hip/hip_runtime.h> #include <cuda.h> int main() { printf("04. CUDA Driver API Defines synthetic test\n"); // CHECK: #define __HIPCC__; #define __CUDACC__; // CHECK: int DEVICE_CPU = hipCpuDeviceId; // CHECK-NEXT: int DEVICE_INVALID = hipInvalidDeviceId; // CHECK-NEXT: int IPC_HANDLE_SIZE = HIP_IPC_HANDLE_SIZE; int DEVICE_CPU = CU_DEVICE_CPU; int DEVICE_INVALID = CU_DEVICE_INVALID; int IPC_HANDLE_SIZE = CU_IPC_HANDLE_SIZE; // CHECK: void* LAUNCH_PARAM_BUFFER_POINTER = HIP_LAUNCH_PARAM_BUFFER_POINTER; // CHECK-NEXT: void* LAUNCH_PARAM_BUFFER_SIZE = HIP_LAUNCH_PARAM_BUFFER_SIZE; // CHECK-NEXT: void* LAUNCH_PARAM_END = HIP_LAUNCH_PARAM_END; void* LAUNCH_PARAM_BUFFER_POINTER = CU_LAUNCH_PARAM_BUFFER_POINTER; void* LAUNCH_PARAM_BUFFER_SIZE = CU_LAUNCH_PARAM_BUFFER_SIZE; void* LAUNCH_PARAM_END = CU_LAUNCH_PARAM_END; // CHECK: int MEMHOSTALLOC_PORTABLE = hipHostMallocPortable; // CHECK-NEXT: int MEMHOSTALLOC_DEVICEMAP = hipHostMallocMapped; // CHECK-NEXT: int MEMHOSTALLOC_WRITECOMBINED = hipHostMallocWriteCombined; // CHECK-NEXT: int MEMHOSTREGISTER_PORTABLE = hipHostRegisterPortable; // CHECK-NEXT: int MEMHOSTREGISTER_DEVICEMAP = hipHostRegisterMapped; // CHECK-NEXT: int MEMHOSTREGISTER_IOMEMORY = hipHostRegisterIoMemory; int MEMHOSTALLOC_PORTABLE = CU_MEMHOSTALLOC_PORTABLE; int MEMHOSTALLOC_DEVICEMAP = CU_MEMHOSTALLOC_DEVICEMAP; int MEMHOSTALLOC_WRITECOMBINED = CU_MEMHOSTALLOC_WRITECOMBINED; int MEMHOSTREGISTER_PORTABLE = CU_MEMHOSTREGISTER_PORTABLE; int MEMHOSTREGISTER_DEVICEMAP = CU_MEMHOSTREGISTER_DEVICEMAP; int MEMHOSTREGISTER_IOMEMORY = CU_MEMHOSTREGISTER_IOMEMORY; // CHECK: int TRSA_OVERRIDE_FORMAT = HIP_TRSA_OVERRIDE_FORMAT; // CHECK-NEXT: int TRSF_NORMALIZED_COORDINATES = HIP_TRSF_NORMALIZED_COORDINATES; // CHECK-NEXT: int TRSF_READ_AS_INTEGER = HIP_TRSF_READ_AS_INTEGER; // CHECK-NEXT: int TRSF_SRGB = HIP_TRSF_SRGB; // CHECK-NEXT: hipStream_t STREAM_PER_THREAD = hipStreamPerThread; int TRSA_OVERRIDE_FORMAT = CU_TRSA_OVERRIDE_FORMAT; int TRSF_NORMALIZED_COORDINATES = CU_TRSF_NORMALIZED_COORDINATES; int TRSF_READ_AS_INTEGER = CU_TRSF_READ_AS_INTEGER; int TRSF_SRGB = CU_TRSF_SRGB; CUstream STREAM_PER_THREAD = CU_STREAM_PER_THREAD; // CHECK: int ARRAY3D_LAYERED = hipArrayLayered; // CHECK-NEXT: int ARRAY3D_SURFACE_LDST = hipArraySurfaceLoadStore; // CHECK-NEXT: int ARRAY3D_CUBEMAP = hipArrayCubemap; // CHECK-NEXT: int ARRAY3D_TEXTURE_GATHER = hipArrayTextureGather; int ARRAY3D_LAYERED = CUDA_ARRAY3D_LAYERED; int ARRAY3D_SURFACE_LDST = CUDA_ARRAY3D_SURFACE_LDST; int ARRAY3D_CUBEMAP = CUDA_ARRAY3D_CUBEMAP; int ARRAY3D_TEXTURE_GATHER = CUDA_ARRAY3D_TEXTURE_GATHER; // CHECK: int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = hipCooperativeLaunchMultiDeviceNoPreSync; // CHECK-NEXT: int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = hipCooperativeLaunchMultiDeviceNoPostSync; int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC = CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC; int COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC = CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC; return 0; }
fd894302bd5fd684ee9cbf86cb1258dd2cd25bc7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgeellmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ double dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; double val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors hipLaunchKernelGGL(( dmgeellmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
fd894302bd5fd684ee9cbf86cb1258dd2cd25bc7.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from sparse/blas/zmgeellmv.cu, normal z -> d, Sun Nov 20 20:20:40 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void dmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, double * dval, magma_index_t * dcolind, double * dx, double beta, double * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ double dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; double val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors dmgeellmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
41aaf86a30f78b1aca2a215cac00d52bcce8b585.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This code is adopted from Bell and Garland, NVIDIA Corporation * under the Apache License, Version 2.0: * * http://www.apache.org/licenses/LICENSE-2.0 * * @author Marc Suchard */ #pragma once #include "GPU/GPUImplDefs.h" #ifdef __cplusplus extern "C" { #endif //#include "sparse_formats.h" //#include "texture.h" //#include "kernels/spmv_common_device.cu.h" ////////////////////////////////////////////////////////////////////////////// // CSR SpMV kernels based on a vector model (one warp per row) ////////////////////////////////////////////////////////////////////////////// // // spmv_csr_vector_device // Each row of the CSR matrix is assigned to a warp. The warp computes // y[i] = A[i,:] * x, i.e. the dot product of the i-th row of A with // the x vector, in parallel. This division of work implies that // the CSR index and data arrays (Aj and Ax) are accessed in a contiguous // manner (but generally not aligned). On GT200 these accesses are // coalesced, unlike kernels based on the one-row-per-thread division of // work. Since an entire 32-thread warp is assigned to each row, many // threads will remain idle when their row contains a small number // of elements. This code relies on implicit synchronization among // threads in a warp. // // spmv_csr_vector_tex_device // Same as spmv_csr_vector_tex_device, except that the texture cache is // used for accessing the x vector. //template <typename unsigned int, typename REAL, unsigned int CSR_BLOCK_SIZE, bool UseCache> __global__ void spmv_csr_vector_kernel(const unsigned int * Ap, #ifndef NO_COLUMNS const unsigned int * Aj, #endif #ifndef IS_INDICATOR_MATRIX const REAL * Ax, #endif const REAL * x, REAL * y, const unsigned int num_rows) { __shared__ REAL sdata[CSR_BLOCK_SIZE + 16]; // padded to avoid reduction ifs __shared__ unsigned int ptrs[CSR_BLOCK_SIZE/WARP_SIZE][2]; const unsigned int thread_id = CSR_BLOCK_SIZE * blockIdx.x + threadIdx.x; // global thread index const unsigned int thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp const unsigned int warp_id = thread_id / WARP_SIZE; // global warp index const unsigned int warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA const unsigned int num_warps = (CSR_BLOCK_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps for(unsigned int row = warp_id; row < num_rows; row += num_warps){ // use two threads to fetch Ap[row] and Ap[row+1] // this is considerably faster than the straightforward version if(thread_lane < 2) ptrs[warp_lane][thread_lane] = Ap[row + thread_lane]; const unsigned int row_start = ptrs[warp_lane][0]; //same as: row_start = Ap[row]; const unsigned int row_end = ptrs[warp_lane][1]; //same as: row_end = Ap[row+1]; // compute local sum REAL sum = 0; for(unsigned int jj = row_start + thread_lane; jj < row_end; jj += WARP_SIZE) #ifdef IS_INDICATOR_MATRIX #ifdef NO_COLUMNS sum += x[jj]; #else sum += x[Aj[jj]]; #endif #else #ifdef NO_COLUMNS sum += Ax[jj] * x[jj]; #else sum += Ax[jj] * x[Aj[jj]]; #endif #endif // reduce local sums to row sum (ASSUME: warpsize 32) sdata[threadIdx.x] = sum; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 16]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 8]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 4]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 2]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 1]; //EMUSYNC; // first thread writes warp result if (thread_lane == 0) y[row] = sdata[threadIdx.x]; } } #ifdef __cplusplus } // extern "C" #endif
41aaf86a30f78b1aca2a215cac00d52bcce8b585.cu
/* * This code is adopted from Bell and Garland, NVIDIA Corporation * under the Apache License, Version 2.0: * * http://www.apache.org/licenses/LICENSE-2.0 * * @author Marc Suchard */ #pragma once #include "GPU/GPUImplDefs.h" #ifdef __cplusplus extern "C" { #endif //#include "sparse_formats.h" //#include "texture.h" //#include "kernels/spmv_common_device.cu.h" ////////////////////////////////////////////////////////////////////////////// // CSR SpMV kernels based on a vector model (one warp per row) ////////////////////////////////////////////////////////////////////////////// // // spmv_csr_vector_device // Each row of the CSR matrix is assigned to a warp. The warp computes // y[i] = A[i,:] * x, i.e. the dot product of the i-th row of A with // the x vector, in parallel. This division of work implies that // the CSR index and data arrays (Aj and Ax) are accessed in a contiguous // manner (but generally not aligned). On GT200 these accesses are // coalesced, unlike kernels based on the one-row-per-thread division of // work. Since an entire 32-thread warp is assigned to each row, many // threads will remain idle when their row contains a small number // of elements. This code relies on implicit synchronization among // threads in a warp. // // spmv_csr_vector_tex_device // Same as spmv_csr_vector_tex_device, except that the texture cache is // used for accessing the x vector. //template <typename unsigned int, typename REAL, unsigned int CSR_BLOCK_SIZE, bool UseCache> __global__ void spmv_csr_vector_kernel(const unsigned int * Ap, #ifndef NO_COLUMNS const unsigned int * Aj, #endif #ifndef IS_INDICATOR_MATRIX const REAL * Ax, #endif const REAL * x, REAL * y, const unsigned int num_rows) { __shared__ REAL sdata[CSR_BLOCK_SIZE + 16]; // padded to avoid reduction ifs __shared__ unsigned int ptrs[CSR_BLOCK_SIZE/WARP_SIZE][2]; const unsigned int thread_id = CSR_BLOCK_SIZE * blockIdx.x + threadIdx.x; // global thread index const unsigned int thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp const unsigned int warp_id = thread_id / WARP_SIZE; // global warp index const unsigned int warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA const unsigned int num_warps = (CSR_BLOCK_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps for(unsigned int row = warp_id; row < num_rows; row += num_warps){ // use two threads to fetch Ap[row] and Ap[row+1] // this is considerably faster than the straightforward version if(thread_lane < 2) ptrs[warp_lane][thread_lane] = Ap[row + thread_lane]; const unsigned int row_start = ptrs[warp_lane][0]; //same as: row_start = Ap[row]; const unsigned int row_end = ptrs[warp_lane][1]; //same as: row_end = Ap[row+1]; // compute local sum REAL sum = 0; for(unsigned int jj = row_start + thread_lane; jj < row_end; jj += WARP_SIZE) #ifdef IS_INDICATOR_MATRIX #ifdef NO_COLUMNS sum += x[jj]; #else sum += x[Aj[jj]]; #endif #else #ifdef NO_COLUMNS sum += Ax[jj] * x[jj]; #else sum += Ax[jj] * x[Aj[jj]]; #endif #endif // reduce local sums to row sum (ASSUME: warpsize 32) sdata[threadIdx.x] = sum; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 16]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 8]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 4]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 2]; //EMUSYNC; sdata[threadIdx.x] = sum = sum + sdata[threadIdx.x + 1]; //EMUSYNC; // first thread writes warp result if (thread_lane == 0) y[row] = sdata[threadIdx.x]; } } #ifdef __cplusplus } // extern "C" #endif
5c75d80ae7fc50d3d1e1dc547cb3f5f327d1b273.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ // RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ // RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s #include "Inputs/cuda.h" // CHECK-LABEL: @_Z16use_dispatch_ptrPi( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[DISPATCH_PTR:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT]] to i32** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT_ADDR]] to i32** // CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[DISPATCH_PTR]] to i32** // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i32 addrspace(1)* [[OUT_COERCE:%.*]] to i32* // CHECK-NEXT: store i32* [[TMP0]], i32** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i32*, i32** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i32* [[OUT1]], i32** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[DISPATCH_PTR_ASCAST]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DISPATCH_PTR_ASCAST]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP5]], align 4 // CHECK-NEXT: ret void // __global__ void use_dispatch_ptr(int* out) { const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr(); *out = *dispatch_ptr; } __global__ // CHECK-LABEL: @_Z12test_ds_fmaxf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float [[TMP0]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // void test_ds_fmax(float src) { __shared__ float shared; volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false); } // CHECK-LABEL: @_Z12test_ds_faddf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float [[TMP0]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fadd(float src) { __shared__ float shared; volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false); } // CHECK-LABEL: @_Z12test_ds_fminfPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float** // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float** // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)* // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fmin(float src, float *shared) { volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); } // CHECK-LABEL: @_Z33test_ret_builtin_nondef_addrspacev( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8, addrspace(5) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast i8* addrspace(5)* [[X]] to i8** // CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() // CHECK-NEXT: [[TMP1:%.*]] = addrspacecast i8 addrspace(4)* [[TMP0]] to i8* // CHECK-NEXT: store i8* [[TMP1]], i8** [[X_ASCAST]], align 8 // CHECK-NEXT: ret void // __device__ void test_ret_builtin_nondef_addrspace() { void *x = __builtin_amdgcn_dispatch_ptr(); } // CHECK-LABEL: @_Z6endpgmv( // CHECK-NEXT: entry: // CHECK-NEXT: call void @llvm.amdgcn.endpgm() // CHECK-NEXT: ret void // __global__ void endpgm() { __builtin_amdgcn_endpgm(); } // Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion. // CHECK-LABEL: @_Z14test_uicmp_i64Pyyy( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8, addrspace(5) // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64** // CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[A_ADDR]] to i64* // CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[B_ADDR]] to i64* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64* // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[B_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35) // CHECK-NEXT: [[TMP4:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP4]], align 8 // CHECK-NEXT: ret void // __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b) { *out = __builtin_amdgcn_uicmpl(a, b, 30+5); } // Check the 64 bit return value is correctly returned without truncation or assertion. // CHECK-LABEL: @_Z14test_s_memtimePy( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64** // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64* // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime() // CHECK-NEXT: [[TMP2:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 8 // CHECK-NEXT: ret void // __global__ void test_s_memtime(unsigned long long* out) { *out = __builtin_amdgcn_s_memtime(); } // Check a generic pointer can be passed as a shared pointer and a generic pointer. __device__ void func(float *x); // CHECK-LABEL: @_Z17test_ds_fmin_funcfPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float** // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float** // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)* // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: call void @_Z4funcPf(float* noundef [[TMP5]]) #[[ATTR8:[0-9]+]] // CHECK-NEXT: ret void // __global__ void test_ds_fmin_func(float src, float *__restrict shared) { volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); func(shared); } // CHECK-LABEL: @_Z14test_is_sharedPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X]] to float** // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X_ADDR]] to float** // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast i8 addrspace(5)* [[RET]] to i8* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[X_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load float*, float** [[X_ASCAST]], align 8 // CHECK-NEXT: store float* [[X1]], float** [[X_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[X_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP1]] to i8* // CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[TMP2]]) // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8 // CHECK-NEXT: store i8 [[FROMBOOL]], i8* [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_shared(float *x){ bool ret = __builtin_amdgcn_is_shared(x); }
5c75d80ae7fc50d3d1e1dc547cb3f5f327d1b273.cu
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ // RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \ // RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \ // RUN: -o - | FileCheck %s #include "Inputs/cuda.h" // CHECK-LABEL: @_Z16use_dispatch_ptrPi( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[DISPATCH_PTR:%.*]] = alloca i32*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT]] to i32** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[OUT_ADDR]] to i32** // CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast i32* addrspace(5)* [[DISPATCH_PTR]] to i32** // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i32 addrspace(1)* [[OUT_COERCE:%.*]] to i32* // CHECK-NEXT: store i32* [[TMP0]], i32** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i32*, i32** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i32* [[OUT1]], i32** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i8 addrspace(4)* [[TMP1]] to i32* // CHECK-NEXT: store i32* [[TMP2]], i32** [[DISPATCH_PTR_ASCAST]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DISPATCH_PTR_ASCAST]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i32 [[TMP4]], i32* [[TMP5]], align 4 // CHECK-NEXT: ret void // __global__ void use_dispatch_ptr(int* out) { const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr(); *out = *dispatch_ptr; } __global__ // CHECK-LABEL: @_Z12test_ds_fmaxf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fmax.f32(float addrspace(3)* @_ZZ12test_ds_fmaxfE6shared, float [[TMP0]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // void test_ds_fmax(float src) { __shared__ float shared; volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false); } // CHECK-LABEL: @_Z12test_ds_faddf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fadd.f32(float addrspace(3)* @_ZZ12test_ds_faddfE6shared, float [[TMP0]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP1]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fadd(float src) { __shared__ float shared; volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false); } // CHECK-LABEL: @_Z12test_ds_fminfPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float** // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float** // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)* // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: ret void // __global__ void test_ds_fmin(float src, float *shared) { volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); } // CHECK-LABEL: @_Z33test_ret_builtin_nondef_addrspacev( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca i8*, align 8, addrspace(5) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast i8* addrspace(5)* [[X]] to i8** // CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr() // CHECK-NEXT: [[TMP1:%.*]] = addrspacecast i8 addrspace(4)* [[TMP0]] to i8* // CHECK-NEXT: store i8* [[TMP1]], i8** [[X_ASCAST]], align 8 // CHECK-NEXT: ret void // __device__ void test_ret_builtin_nondef_addrspace() { void *x = __builtin_amdgcn_dispatch_ptr(); } // CHECK-LABEL: @_Z6endpgmv( // CHECK-NEXT: entry: // CHECK-NEXT: call void @llvm.amdgcn.endpgm() // CHECK-NEXT: ret void // __global__ void endpgm() { __builtin_amdgcn_endpgm(); } // Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion. // CHECK-LABEL: @_Z14test_uicmp_i64Pyyy( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8, addrspace(5) // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64** // CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[A_ADDR]] to i64* // CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast i64 addrspace(5)* [[B_ADDR]] to i64* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64* // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[A:%.*]], i64* [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[B:%.*]], i64* [[B_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* [[B_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35) // CHECK-NEXT: [[TMP4:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[TMP3]], i64* [[TMP4]], align 8 // CHECK-NEXT: ret void // __global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b) { *out = __builtin_amdgcn_uicmpl(a, b, 30+5); } // Check the 64 bit return value is correctly returned without truncation or assertion. // CHECK-LABEL: @_Z14test_s_memtimePy( // CHECK-NEXT: entry: // CHECK-NEXT: [[OUT:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca i64*, align 8, addrspace(5) // CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT]] to i64** // CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast i64* addrspace(5)* [[OUT_ADDR]] to i64** // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast i64 addrspace(1)* [[OUT_COERCE:%.*]] to i64* // CHECK-NEXT: store i64* [[TMP0]], i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: [[OUT1:%.*]] = load i64*, i64** [[OUT_ASCAST]], align 8 // CHECK-NEXT: store i64* [[OUT1]], i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime() // CHECK-NEXT: [[TMP2:%.*]] = load i64*, i64** [[OUT_ADDR_ASCAST]], align 8 // CHECK-NEXT: store i64 [[TMP1]], i64* [[TMP2]], align 8 // CHECK-NEXT: ret void // __global__ void test_s_memtime(unsigned long long* out) { *out = __builtin_amdgcn_s_memtime(); } // Check a generic pointer can be passed as a shared pointer and a generic pointer. __device__ void func(float *x); // CHECK-LABEL: @_Z17test_ds_fmin_funcfPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[SHARED:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5) // CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED]] to float** // CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[SRC_ADDR]] to float* // CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[SHARED_ADDR]] to float** // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float addrspace(5)* [[X]] to float* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[SHARED_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: [[SHARED1:%.*]] = load float*, float** [[SHARED_ASCAST]], align 8 // CHECK-NEXT: store float [[SRC:%.*]], float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: store float* [[SHARED1]], float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = addrspacecast float* [[TMP1]] to float addrspace(3)* // CHECK-NEXT: [[TMP3:%.*]] = load float, float* [[SRC_ADDR_ASCAST]], align 4 // CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(float addrspace(3)* [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false) // CHECK-NEXT: store volatile float [[TMP4]], float* [[X_ASCAST]], align 4 // CHECK-NEXT: [[TMP5:%.*]] = load float*, float** [[SHARED_ADDR_ASCAST]], align 8 // CHECK-NEXT: call void @_Z4funcPf(float* noundef [[TMP5]]) #[[ATTR8:[0-9]+]] // CHECK-NEXT: ret void // __global__ void test_ds_fmin_func(float src, float *__restrict shared) { volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false); func(shared); } // CHECK-LABEL: @_Z14test_is_sharedPf( // CHECK-NEXT: entry: // CHECK-NEXT: [[X:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[X_ADDR:%.*]] = alloca float*, align 8, addrspace(5) // CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5) // CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X]] to float** // CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast float* addrspace(5)* [[X_ADDR]] to float** // CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast i8 addrspace(5)* [[RET]] to i8* // CHECK-NEXT: [[TMP0:%.*]] = addrspacecast float addrspace(1)* [[X_COERCE:%.*]] to float* // CHECK-NEXT: store float* [[TMP0]], float** [[X_ASCAST]], align 8 // CHECK-NEXT: [[X1:%.*]] = load float*, float** [[X_ASCAST]], align 8 // CHECK-NEXT: store float* [[X1]], float** [[X_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load float*, float** [[X_ADDR_ASCAST]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[TMP1]] to i8* // CHECK-NEXT: [[TMP3:%.*]] = call i1 @llvm.amdgcn.is.shared(i8* [[TMP2]]) // CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP3]] to i8 // CHECK-NEXT: store i8 [[FROMBOOL]], i8* [[RET_ASCAST]], align 1 // CHECK-NEXT: ret void // __global__ void test_is_shared(float *x){ bool ret = __builtin_amdgcn_is_shared(x); }
716ff0287167554167321713541a7cf4331295d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/common.h" #include "cuda_kernels.h" #include "hipcub/hipcub.hpp" #include <assert.h> #include <cstdio> #include <cstdlib> #include <climits> #include <cfloat> namespace fastertransformer { /* ********************************** common kernel *********************************** */ template <typename T> __global__ void init_kernel(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int n, const int beam_width) { int tid = threadIdx.x; finished[tid] = false; sequence_length[tid] = 0; word_ids[tid] = sentence_id; cum_log_probs[tid] = (T)(tid % beam_width == 0 ? 0.0f: -1e20f); } void init_kernelLauncher(bool* finished, int* sequence_length, int* word_ids, float* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, hipStream_t stream) { dim3 grid(1); dim3 block(min(1024, batch_size * beam_width)); assert(batch_size * beam_width <= 1024); hipLaunchKernelGGL(( init_kernel<float>), dim3(grid), dim3(block), 0, stream, finished, sequence_length, word_ids, cum_log_probs, sentence_id, batch_size * beam_width, beam_width); } template <typename T> __global__ void embedding_lookup_sine_position_encoding_kernel(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int hidden_units) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int write_pos = tid + bid * blockDim.x; // 1. lookup the table // 2. multiply hidden_dim**0.5 // 3. add the position encoding from_tensor[write_pos] = embedding_table[word_ids[bid] * hidden_units + tid] * (T)sqrtf(float(hidden_units)) + position_encoding[tid]; } template <typename T> void embedding_lookup_sine_position_encoding_kernel_launcher(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream) { assert(hidden_units <= 1024); dim3 grid(batch_size); dim3 block(hidden_units); hipLaunchKernelGGL(( embedding_lookup_sine_position_encoding_kernel<T>), dim3(grid), dim3(block), 0, stream, from_tensor, embedding_table, position_encoding, word_ids, hidden_units); } /* *************************** end of common kernel *********************************** */ /* ********************************** BeamSearch kernel *********************************** */ template<typename T> __global__ void broadcast_kernel(T* log_probs, T* cum_log_probs, const int vocab_size, const int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int bid = tid / vocab_size; if(tid < N) log_probs[tid] += cum_log_probs[bid]; } void broadcast_kernelLauncher(float* log_probs, float* cum_log_probs, const int batch_size, const int beam_width, const int vocab_size, hipStream_t stream) { int N = batch_size * beam_width * vocab_size; dim3 block(1024); dim3 grid((N - 1) / block.x + 1); hipLaunchKernelGGL(( broadcast_kernel<float>), dim3(grid), dim3(block), 0, stream, log_probs, cum_log_probs, vocab_size, N); } template <typename T> __global__ void update_kernel(T* log_probs, T* cum_log_probs, int* ids, bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int batch_size, const int beam_width, const int vocab_size, const int end_id, int* finished_count) { int tid = threadIdx.x; sequence_length[tid] = finished[tid] ? sequence_length[tid] : sequence_length[tid] + 1; int beam_id = word_ids[tid] / vocab_size; int word_id = word_ids[tid] % vocab_size; cum_log_probs[tid] = log_probs[word_ids[tid]]; sequence_length[tid] = sequence_length[beam_id]; finished[tid] = word_id == end_id ? 1 : 0; parent_ids[tid] = beam_id; word_ids[tid] = word_id; output_ids[tid] = word_id; } void update_kernelLauncher(float* log_probs, float* cum_log_probs, int* ids, bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int batch_size, const int beam_width, const int vocab_size, hipStream_t stream, const int end_id, int* finished_count) { dim3 grid(1); dim3 block(batch_size * beam_width); assert(block.x <= 1024); hipLaunchKernelGGL(( update_kernel<float>), dim3(grid), dim3(block), 0, stream, log_probs, cum_log_probs, ids, finished, parent_ids, sequence_length, word_ids, output_ids, batch_size, beam_width, vocab_size, end_id, finished_count); } template <typename T> __global__ void update_kernel_v2(bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int vocab_size, const int end_id, int* finished_count) { int tid = threadIdx.x; sequence_length[tid] = finished[tid] ? sequence_length[tid] : sequence_length[tid] + 1; int beam_id = word_ids[tid] / vocab_size; int word_id = word_ids[tid] % vocab_size; sequence_length[tid] = sequence_length[beam_id]; finished[tid] = word_id == end_id ? 1 : 0; parent_ids[tid] = beam_id; word_ids[tid] = word_id; output_ids[tid] = word_id; } void update_kernelLauncher_v2(bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, int* finished_count, DecodingBeamsearchArguments args, hipStream_t stream) { dim3 grid(1); dim3 block(args.batch_size_ * args.beam_width_); assert(block.x <= 1024); hipLaunchKernelGGL(( update_kernel_v2<float>), dim3(grid), dim3(block), 0, stream, finished, parent_ids, sequence_length, word_ids, output_ids, args.vocab_size_, args.end_id_, finished_count); } template <typename T> __global__ void update_KV_cache_kernel(const T* __restrict key_src_cache, T* key_tgt_cache, const T* __restrict value_src_cache, T* value_tgt_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int cache_size, const int step, const int decoder_layers) { int layer_id = blockIdx.x / batch_size / beam_width / step; int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step); int beam_id = (blockIdx.x % (beam_width * step)) / step; int step_id = blockIdx.x % step; int hidden_id = step_id * batch_size * beam_width * hidden_dim + beam_ids[batch_id * beam_width + beam_id] * hidden_dim; int tgt_hidden_id = step_id * batch_size * beam_width * hidden_dim + batch_id * beam_width * hidden_dim + beam_id * hidden_dim; const T* key_src_ptr = key_src_cache + layer_id * cache_size; T* key_tgt_ptr = key_tgt_cache + layer_id * cache_size; const T* value_src_ptr = value_src_cache + layer_id * cache_size; T* value_tgt_ptr = value_tgt_cache + layer_id * cache_size; for(int tid = threadIdx.x; tid < hidden_dim; tid += blockDim.x) { key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid]; value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid]; } } template <> __global__ void update_KV_cache_kernel(const half* __restrict key_src_cache, half* key_tgt_cache, const half* __restrict value_src_cache, half* value_tgt_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int cache_size, const int step, const int decoder_layers) { int layer_id = blockIdx.x / batch_size / beam_width / step; int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step); int beam_id = (blockIdx.x % (beam_width * step)) / step; int step_id = blockIdx.x % step; int hidden_id = (step_id * batch_size * beam_width * hidden_dim + beam_ids[batch_id * beam_width + beam_id] * hidden_dim) / 2; int tgt_hidden_id = (step_id * batch_size * beam_width * hidden_dim + batch_id * beam_width * hidden_dim + beam_id * hidden_dim) / 2; const half2* key_src_ptr = (const half2*)key_src_cache + layer_id * cache_size / 2; half2* key_tgt_ptr = (half2*)key_tgt_cache + layer_id * cache_size / 2; const half2* value_src_ptr = (const half2*)value_src_cache + layer_id * cache_size / 2; half2* value_tgt_ptr = (half2*)value_tgt_cache + layer_id * cache_size / 2; for(int tid = threadIdx.x; tid < hidden_dim / 2; tid += blockDim.x) { key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid]; value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid]; } } template <typename T> void update_KV_cache_kernelLauncher(T** key_cache, T** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, hipStream_t stream) { dim3 grid(decoder_layers * batch_size * beam_width * step); dim3 block(min(1024, hidden_dim)); block.x = block.x / (4 / sizeof(T)); int src_id = step & 0x1; int tgt_id = 1 - src_id; hipLaunchKernelGGL(( update_KV_cache_kernel), dim3(grid), dim3(block), 0, stream, key_cache[src_id], key_cache[tgt_id], value_cache[src_id], value_cache[tgt_id], beam_ids, batch_size, beam_width, hidden_dim, cache_size, step, decoder_layers); } /* *************************** end of BeamSearch kernel *********************************** */ /* ********************************** Sampling kernel *********************************** */ __global__ void topp_initialization_kernel(bool* finished, int* sequence_length, int* word_ids, int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size, const int start_id) { int tid = threadIdx.x; int bid = blockIdx.x; if(bid == 0) { for(int i = tid; i < batch_size + 1; i+= blockDim.x) { topp_offset_buf[i] = i * vocab_size; } for(int i = tid; i < batch_size; i+= blockDim.x) { finished[i] = false; sequence_length[i] = 0; word_ids[i] = start_id; } } int index = tid + bid * blockDim.x; while(index < batch_size * vocab_size) { topp_id_val_buf[index] = index % vocab_size; index += blockDim.x * gridDim.x; } } void topp_initialization_kernelLauncher(bool* finished, int* sequence_length, int* word_ids, int* topp_id_val_buf, int* topp_offset_buf, DecodingSamplingArguments args, hipStream_t stream) { hipLaunchKernelGGL(( topp_initialization_kernel), dim3(32), dim3(512), 0, stream, finished, sequence_length, word_ids, topp_id_val_buf, topp_offset_buf, args.batch_size_, args.vocab_size_, args.start_id_); } size_t get_topp_sort_temp_storage_size(const float* log_probs, const int* id_vals, float* sorted_log_probs, int* sorted_id_vals, int* topp_offset_buf, const int batch_size, const int vocab_size) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, log_probs, sorted_log_probs, id_vals, sorted_id_vals, vocab_size * batch_size, batch_size, topp_offset_buf, topp_offset_buf + 1); return temp_storage_bytes; } /* *************************** end of Sampling kernel *********************************** */ /* ********************************** Instantiation *********************************** */ template void embedding_lookup_sine_position_encoding_kernel_launcher(float* from_tensor, const float* embedding_table, const float* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream); template void embedding_lookup_sine_position_encoding_kernel_launcher(half* from_tensor, const half* embedding_table, const half* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, hipStream_t stream); template void update_KV_cache_kernelLauncher(float** key_cache, float** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, hipStream_t stream); template void update_KV_cache_kernelLauncher(half** key_cache, half** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, hipStream_t stream); /* *************************** end of Instantiation *********************************** */ } // end of name space fastertransformer
716ff0287167554167321713541a7cf4331295d2.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fastertransformer/common.h" #include "cuda_kernels.h" #include "cub/cub.cuh" #include <assert.h> #include <cstdio> #include <cstdlib> #include <climits> #include <cfloat> namespace fastertransformer { /* ********************************** common kernel *********************************** */ template <typename T> __global__ void init_kernel(bool* finished, int* sequence_length, int* word_ids, T* cum_log_probs, const int sentence_id, const int n, const int beam_width) { int tid = threadIdx.x; finished[tid] = false; sequence_length[tid] = 0; word_ids[tid] = sentence_id; cum_log_probs[tid] = (T)(tid % beam_width == 0 ? 0.0f: -1e20f); } void init_kernelLauncher(bool* finished, int* sequence_length, int* word_ids, float* cum_log_probs, const int sentence_id, const int batch_size, const int beam_width, cudaStream_t stream) { dim3 grid(1); dim3 block(min(1024, batch_size * beam_width)); assert(batch_size * beam_width <= 1024); init_kernel<float><<<grid, block, 0, stream>>>(finished, sequence_length, word_ids, cum_log_probs, sentence_id, batch_size * beam_width, beam_width); } template <typename T> __global__ void embedding_lookup_sine_position_encoding_kernel(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int hidden_units) { const int tid = threadIdx.x; const int bid = blockIdx.x; const int write_pos = tid + bid * blockDim.x; // 1. lookup the table // 2. multiply hidden_dim**0.5 // 3. add the position encoding from_tensor[write_pos] = embedding_table[word_ids[bid] * hidden_units + tid] * (T)sqrtf(float(hidden_units)) + position_encoding[tid]; } template <typename T> void embedding_lookup_sine_position_encoding_kernel_launcher(T* from_tensor, const T* embedding_table, const T* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream) { assert(hidden_units <= 1024); dim3 grid(batch_size); dim3 block(hidden_units); embedding_lookup_sine_position_encoding_kernel<T><<<grid, block, 0, stream>>>(from_tensor, embedding_table, position_encoding, word_ids, hidden_units); } /* *************************** end of common kernel *********************************** */ /* ********************************** BeamSearch kernel *********************************** */ template<typename T> __global__ void broadcast_kernel(T* log_probs, T* cum_log_probs, const int vocab_size, const int N) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int bid = tid / vocab_size; if(tid < N) log_probs[tid] += cum_log_probs[bid]; } void broadcast_kernelLauncher(float* log_probs, float* cum_log_probs, const int batch_size, const int beam_width, const int vocab_size, cudaStream_t stream) { int N = batch_size * beam_width * vocab_size; dim3 block(1024); dim3 grid((N - 1) / block.x + 1); broadcast_kernel<float><<<grid, block, 0, stream>>>(log_probs, cum_log_probs, vocab_size, N); } template <typename T> __global__ void update_kernel(T* log_probs, T* cum_log_probs, int* ids, bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int batch_size, const int beam_width, const int vocab_size, const int end_id, int* finished_count) { int tid = threadIdx.x; sequence_length[tid] = finished[tid] ? sequence_length[tid] : sequence_length[tid] + 1; int beam_id = word_ids[tid] / vocab_size; int word_id = word_ids[tid] % vocab_size; cum_log_probs[tid] = log_probs[word_ids[tid]]; sequence_length[tid] = sequence_length[beam_id]; finished[tid] = word_id == end_id ? 1 : 0; parent_ids[tid] = beam_id; word_ids[tid] = word_id; output_ids[tid] = word_id; } void update_kernelLauncher(float* log_probs, float* cum_log_probs, int* ids, bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int batch_size, const int beam_width, const int vocab_size, cudaStream_t stream, const int end_id, int* finished_count) { dim3 grid(1); dim3 block(batch_size * beam_width); assert(block.x <= 1024); update_kernel<float><<<grid, block, 0, stream>>>(log_probs, cum_log_probs, ids, finished, parent_ids, sequence_length, word_ids, output_ids, batch_size, beam_width, vocab_size, end_id, finished_count); } template <typename T> __global__ void update_kernel_v2(bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, const int vocab_size, const int end_id, int* finished_count) { int tid = threadIdx.x; sequence_length[tid] = finished[tid] ? sequence_length[tid] : sequence_length[tid] + 1; int beam_id = word_ids[tid] / vocab_size; int word_id = word_ids[tid] % vocab_size; sequence_length[tid] = sequence_length[beam_id]; finished[tid] = word_id == end_id ? 1 : 0; parent_ids[tid] = beam_id; word_ids[tid] = word_id; output_ids[tid] = word_id; } void update_kernelLauncher_v2(bool* finished, int* parent_ids, int* sequence_length, int* word_ids, int* output_ids, int* finished_count, DecodingBeamsearchArguments args, cudaStream_t stream) { dim3 grid(1); dim3 block(args.batch_size_ * args.beam_width_); assert(block.x <= 1024); update_kernel_v2<float><<<grid, block, 0, stream>>>(finished, parent_ids, sequence_length, word_ids, output_ids, args.vocab_size_, args.end_id_, finished_count); } template <typename T> __global__ void update_KV_cache_kernel(const T* __restrict key_src_cache, T* key_tgt_cache, const T* __restrict value_src_cache, T* value_tgt_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int cache_size, const int step, const int decoder_layers) { int layer_id = blockIdx.x / batch_size / beam_width / step; int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step); int beam_id = (blockIdx.x % (beam_width * step)) / step; int step_id = blockIdx.x % step; int hidden_id = step_id * batch_size * beam_width * hidden_dim + beam_ids[batch_id * beam_width + beam_id] * hidden_dim; int tgt_hidden_id = step_id * batch_size * beam_width * hidden_dim + batch_id * beam_width * hidden_dim + beam_id * hidden_dim; const T* key_src_ptr = key_src_cache + layer_id * cache_size; T* key_tgt_ptr = key_tgt_cache + layer_id * cache_size; const T* value_src_ptr = value_src_cache + layer_id * cache_size; T* value_tgt_ptr = value_tgt_cache + layer_id * cache_size; for(int tid = threadIdx.x; tid < hidden_dim; tid += blockDim.x) { key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid]; value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid]; } } template <> __global__ void update_KV_cache_kernel(const half* __restrict key_src_cache, half* key_tgt_cache, const half* __restrict value_src_cache, half* value_tgt_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int cache_size, const int step, const int decoder_layers) { int layer_id = blockIdx.x / batch_size / beam_width / step; int batch_id = (blockIdx.x % (batch_size * beam_width * step)) / (beam_width * step); int beam_id = (blockIdx.x % (beam_width * step)) / step; int step_id = blockIdx.x % step; int hidden_id = (step_id * batch_size * beam_width * hidden_dim + beam_ids[batch_id * beam_width + beam_id] * hidden_dim) / 2; int tgt_hidden_id = (step_id * batch_size * beam_width * hidden_dim + batch_id * beam_width * hidden_dim + beam_id * hidden_dim) / 2; const half2* key_src_ptr = (const half2*)key_src_cache + layer_id * cache_size / 2; half2* key_tgt_ptr = (half2*)key_tgt_cache + layer_id * cache_size / 2; const half2* value_src_ptr = (const half2*)value_src_cache + layer_id * cache_size / 2; half2* value_tgt_ptr = (half2*)value_tgt_cache + layer_id * cache_size / 2; for(int tid = threadIdx.x; tid < hidden_dim / 2; tid += blockDim.x) { key_tgt_ptr[tgt_hidden_id + tid] = key_src_ptr[hidden_id + tid]; value_tgt_ptr[tgt_hidden_id + tid] = value_src_ptr[hidden_id + tid]; } } template <typename T> void update_KV_cache_kernelLauncher(T** key_cache, T** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, cudaStream_t stream) { dim3 grid(decoder_layers * batch_size * beam_width * step); dim3 block(min(1024, hidden_dim)); block.x = block.x / (4 / sizeof(T)); int src_id = step & 0x1; int tgt_id = 1 - src_id; update_KV_cache_kernel<<<grid, block, 0, stream>>>( key_cache[src_id], key_cache[tgt_id], value_cache[src_id], value_cache[tgt_id], beam_ids, batch_size, beam_width, hidden_dim, cache_size, step, decoder_layers); } /* *************************** end of BeamSearch kernel *********************************** */ /* ********************************** Sampling kernel *********************************** */ __global__ void topp_initialization_kernel(bool* finished, int* sequence_length, int* word_ids, int* topp_id_val_buf, int* topp_offset_buf, const int batch_size, const int vocab_size, const int start_id) { int tid = threadIdx.x; int bid = blockIdx.x; if(bid == 0) { for(int i = tid; i < batch_size + 1; i+= blockDim.x) { topp_offset_buf[i] = i * vocab_size; } for(int i = tid; i < batch_size; i+= blockDim.x) { finished[i] = false; sequence_length[i] = 0; word_ids[i] = start_id; } } int index = tid + bid * blockDim.x; while(index < batch_size * vocab_size) { topp_id_val_buf[index] = index % vocab_size; index += blockDim.x * gridDim.x; } } void topp_initialization_kernelLauncher(bool* finished, int* sequence_length, int* word_ids, int* topp_id_val_buf, int* topp_offset_buf, DecodingSamplingArguments args, cudaStream_t stream) { topp_initialization_kernel<<<32, 512, 0, stream>>>(finished, sequence_length, word_ids, topp_id_val_buf, topp_offset_buf, args.batch_size_, args.vocab_size_, args.start_id_); } size_t get_topp_sort_temp_storage_size(const float* log_probs, const int* id_vals, float* sorted_log_probs, int* sorted_id_vals, int* topp_offset_buf, const int batch_size, const int vocab_size) { void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes, log_probs, sorted_log_probs, id_vals, sorted_id_vals, vocab_size * batch_size, batch_size, topp_offset_buf, topp_offset_buf + 1); return temp_storage_bytes; } /* *************************** end of Sampling kernel *********************************** */ /* ********************************** Instantiation *********************************** */ template void embedding_lookup_sine_position_encoding_kernel_launcher(float* from_tensor, const float* embedding_table, const float* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream); template void embedding_lookup_sine_position_encoding_kernel_launcher(half* from_tensor, const half* embedding_table, const half* position_encoding, const int* word_ids, const int batch_size, const int hidden_units, cudaStream_t stream); template void update_KV_cache_kernelLauncher(float** key_cache, float** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, cudaStream_t stream); template void update_KV_cache_kernelLauncher(half** key_cache, half** value_cache, const int* beam_ids, const int batch_size, const int beam_width, const int hidden_dim, const int step, const int cache_size, const int decoder_layers, cudaStream_t stream); /* *************************** end of Instantiation *********************************** */ } // end of name space fastertransformer
f9dbb9889156fc26060ef91e7f146232bb7ece78.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmdot_shfl.cu, normal z -> d, Thu Oct 8 23:05:48 2020 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_d.h" #define BLOCK_SIZE 512 #define PRECISION_d #include <hip/hip_runtime.h> // for TORCH_HIP_VERSION #if (TORCH_HIP_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ double warpReduceSum<double>(double val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_D_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle_1dblock( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_dmdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_dblockdot_kernel_shuffle_1dblock), dim3(grid), dim3(block), 32*sizeof(double), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<double>) , dim3(1), dim3(1024), 32*sizeof(double), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); hipLaunchKernelGGL(( magma_dblockdot_kernel_shuffle), dim3(grid), dim3(block), 32*k*sizeof(double), queue->cuda_stream() , n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { hipLaunchKernelGGL(( deviceReduceKernel<double>) , dim3(1), dim3(1024), 32*sizeof(double), queue->cuda_stream(), d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster double res = magma_ddot( n, v, 1, r, 1, queue ); magma_dsetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_dgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_dmdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
f9dbb9889156fc26060ef91e7f146232bb7ece78.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmdot_shfl.cu, normal z -> d, Thu Oct 8 23:05:48 2020 @author Moritz Kreutzer */ #include "magmasparse_internal.h" #include "magmasparse_d.h" #define BLOCK_SIZE 512 #define PRECISION_d #include <cuda.h> // for CUDA_VERSION #if (CUDA_VERSION <= 6000) // CUDA 6.5 adds Double precision version; here's an implementation for CUDA 6.0 and earlier. // from https://devblogs.nvidia.com/parallelforall/faster-parallel-reductions-kepler/ __device__ inline real_Double_t __shfl_down(real_Double_t var, unsigned int srcLane, int width=32) { int2 a = *reinterpret_cast<int2*>(&var); a.x = __shfl_down(a.x, srcLane, width); a.y = __shfl_down(a.y, srcLane, width); return *reinterpret_cast<double*>(&a); } #endif template<typename T> __inline__ __device__ T warpReduceSum(T val) { #if __CUDA_ARCH__ >= 300 #if __CUDACC_VER_MAJOR__ < 9 val += __shfl_down(val, 16); val += __shfl_down(val, 8); val += __shfl_down(val, 4); val += __shfl_down(val, 2); val += __shfl_down(val, 1); #else val += __shfl_down_sync(0xffffffff,val, 16); val += __shfl_down_sync(0xffffffff,val, 8); val += __shfl_down_sync(0xffffffff,val, 4); val += __shfl_down_sync(0xffffffff,val, 2); val += __shfl_down_sync(0xffffffff,val, 1); #endif #endif return val; } #ifdef PRECISION_z template<> __inline__ __device__ double warpReduceSum<double>(double val) { #if __CUDA_ARCH__ >= 300 int4 a = *reinterpret_cast<int4*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.z += __shfl_down(a.z, 16); a.w += __shfl_down(a.w, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.z += __shfl_down(a.z, 8); a.w += __shfl_down(a.w, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.z += __shfl_down(a.z, 4); a.w += __shfl_down(a.w, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.z += __shfl_down(a.z, 2); a.w += __shfl_down(a.w, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); a.z += __shfl_down(a.z, 1); a.w += __shfl_down(a.w, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.z += __shfl_down_sync(0xffffffff,a.z, 16); a.w += __shfl_down_sync(0xffffffff,a.w, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.z += __shfl_down_sync(0xffffffff,a.z, 8); a.w += __shfl_down_sync(0xffffffff,a.w, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.z += __shfl_down_sync(0xffffffff,a.z, 4); a.w += __shfl_down_sync(0xffffffff,a.w, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.z += __shfl_down_sync(0xffffffff,a.z, 2); a.w += __shfl_down_sync(0xffffffff,a.w, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); a.z += __shfl_down_sync(0xffffffff,a.z, 1); a.w += __shfl_down_sync(0xffffffff,a.w, 1); #endif #endif return val; } #endif // PRECISION_z #ifdef PRECISION_c template<> __inline__ __device__ magmaFloatComplex warpReduceSum<magmaFloatComplex>(magmaFloatComplex val) { #if __CUDA_ARCH__ >= 300 float2 a = *reinterpret_cast<float2*>(&val); #if __CUDACC_VER_MAJOR__ < 9 a.x += __shfl_down(a.x, 16); a.y += __shfl_down(a.y, 16); a.x += __shfl_down(a.x, 8); a.y += __shfl_down(a.y, 8); a.x += __shfl_down(a.x, 4); a.y += __shfl_down(a.y, 4); a.x += __shfl_down(a.x, 2); a.y += __shfl_down(a.y, 2); a.x += __shfl_down(a.x, 1); a.y += __shfl_down(a.y, 1); #else a.x += __shfl_down_sync(0xffffffff,a.x, 16); a.y += __shfl_down_sync(0xffffffff,a.y, 16); a.x += __shfl_down_sync(0xffffffff,a.x, 8); a.y += __shfl_down_sync(0xffffffff,a.y, 8); a.x += __shfl_down_sync(0xffffffff,a.x, 4); a.y += __shfl_down_sync(0xffffffff,a.y, 4); a.x += __shfl_down_sync(0xffffffff,a.x, 2); a.y += __shfl_down_sync(0xffffffff,a.y, 2); a.x += __shfl_down_sync(0xffffffff,a.x, 1); a.y += __shfl_down_sync(0xffffffff,a.y, 1); #endif #endif return val; } #endif // PRECISION_c template<typename T> __inline__ __device__ T blockReduceSum_1D(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __inline__ __device__ T blockReduceSum(T val) { extern __shared__ T shared[]; // Shared mem for 32 partial sums int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceSum<T>(val); // Each warp performs partial reduction if (lane == 0) shared[threadIdx.y*32+wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / warpSize) ? shared[threadIdx.y*32+lane] : MAGMA_D_ZERO; if (wid == 0) val = warpReduceSum<T>(val); //Final reduce within first warp return val; } template<typename T> __global__ void deviceReduceKernel(const T * __restrict__ in, T * __restrict__ out, int N) { T sum = MAGMA_D_MAKE(0.0, 0.0); //reduce multiple elements per thread for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sum = blockReduceSum<T>(sum); if (threadIdx.x == 0) out[blockIdx.x]=sum; } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = threadIdx.y; double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } // dot product for multiple vectors using shuffle intrinsics and less shared memory __global__ void magma_dblockdot_kernel_shuffle_1dblock( int n, int k, const double * __restrict__ v, const double * __restrict__ r, double * __restrict__ vtmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j; for (j=0; j < k; j++) { double tmp; if (i < n) { tmp = v[i+j*n] * r[i]; } else { tmp = MAGMA_D_ZERO; } tmp = blockReduceSum_1D(tmp); if (threadIdx.x == 0 ){ vtmp[ blockIdx.x+j*gridDim.x ] = tmp; } } } /** Purpose ------- Computes the scalar product of a set of vectors v_i such that skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmdotc_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if ( magma_getdevice_arch() < 300 ) { return magma_dmdotc( n, k, v, r, d1, d2, skp, queue ); } else if (1) { // 1D block kernel seems to be always faster dim3 block( BLOCK_SIZE ); dim3 grid( magma_ceildiv( n, block.x ) ); magma_dblockdot_kernel_shuffle_1dblock<<< grid, block, 32*sizeof(double), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<double> <<<1, 1024, 32*sizeof(double), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } else { dim3 block( magma_roundup( magma_ceildiv(BLOCK_SIZE, k), 32 ), k ); while (block.x*block.y > 1024) { block.x -= 32; } dim3 grid( magma_ceildiv( n, block.x ) ); magma_dblockdot_kernel_shuffle<<< grid, block, 32*k*sizeof(double), queue->cuda_stream() >>>( n, k, v, r, d1 ); int j; for (j=0; j < k; j++) { deviceReduceKernel<double> <<<1, 1024, 32*sizeof(double), queue->cuda_stream()>>>(d1+grid.x*j, skp+j, grid.x); } } return MAGMA_SUCCESS; } /** Purpose ------- This is an extension of the merged dot product above by chunking the set of vectors v_i such that the data always fits into cache. It is equivalent to a matrix vecor product Vr where V contains few rows and many columns. The computation is the same: skp = ( <v_0,r>, <v_1,r>, .. ) Returns the vector skp. Arguments --------- @param[in] n int length of v_i and r @param[in] k int # vectors v_i @param[in] v magmaDouble_ptr v = (v_0 .. v_i.. v_k) @param[in] r magmaDouble_ptr r @param[in] d1 magmaDouble_ptr workspace @param[in] d2 magmaDouble_ptr workspace @param[out] skp magmaDouble_ptr vector[k] of scalar products (<v_i,r>...) @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_d ********************************************************************/ extern "C" magma_int_t magma_dgemvmdot_shfl( magma_int_t n, magma_int_t k, magmaDouble_ptr v, magmaDouble_ptr r, magmaDouble_ptr d1, magmaDouble_ptr d2, magmaDouble_ptr skp, magma_queue_t queue ) { if (k == 1) { // call CUBLAS dotc, we will never be faster double res = magma_ddot( n, v, 1, r, 1, queue ); magma_dsetvector( 1, &res, 1, skp, 1, queue ); } else if ( magma_getdevice_arch() < 300 ) { return magma_dgemvmdot( n, k, v, r, d1, d2, skp, queue ); } else { magma_dmdotc_shfl( n, k, v, r, d1, d2, skp, queue ); } return MAGMA_SUCCESS; }
c6d6fda1172f858ada3ab6392c70c03dd957fc11.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "gpuKmeans.h" #include "helper_math.h" #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif __device__ float sq_device(float ref) { return ref*ref; } __device__ float sqL2Dist_device_CL(float4 first, float4 second) { return sq_device(first.x*first.w - second.x*second.w) + sq_device(first.y*first.w - second.y*second.w) + sq_device(first.z*first.w - second.z*second.w); } __device__ float sqL2Dist_device_LN(float FR, float FG, float FB, float FA, float SR, float SG, float SB, float SA) { return sq_device(FR*FA - SR*SA) + sq_device(FG*FA - SG*SA) + sq_device(FB*FA - SB*SA); } // In the assignment step, each point (thread) computes its distance to each // cluster centroid and adds its value to the sum of its closest // centroid, as well as incrementing that centroid's count of assigned points. __global__ void assignClusters_parallel_CL(thrust::device_ptr<float4> data, size_t data_size, const thrust::device_ptr<float4> means, thrust::device_ptr<float4> new_sums, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { //extern __shared__ float4 shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // Let the first k threads copy over the cluster means. /*if (threadIdx.x < k) { // Using a flat array shared_means[threadIdx.x] = means[threadIdx.x]; }*/ //thrust::copy(thrust::raw_pointer_cast(means), thrust::raw_pointer_cast(means+k), &shared_means[0]); //__syncthreads(); // Make global loads once. const float4 current = data[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_CL(current, means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->x, current.x); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->y, current.y); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->z, current.z); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->w, current.w); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } __global__ void assignClusters_parallel_4F(thrust::device_ptr<float> inRed, thrust::device_ptr<float> inGrn, thrust::device_ptr<float> inBlu, thrust::device_ptr<float> inAlp, size_t data_size, const thrust::device_ptr<float> meansR, const thrust::device_ptr<float> meansG, const thrust::device_ptr<float> meansB, const thrust::device_ptr<float> meansA, thrust::device_ptr<float> new_sumsR, thrust::device_ptr<float> new_sumsG, thrust::device_ptr<float> new_sumsB, thrust::device_ptr<float> new_sumsA, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // Make global loads once. const float currentR = inRed[index]; const float currentG = inGrn[index]; const float currentB = inBlu[index]; const float currentA = inAlp[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_LN(currentR,currentG,currentB,currentA, meansR[cluster], meansG[cluster], meansB[cluster], meansA[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(thrust::raw_pointer_cast(new_sumsR + best_cluster), currentR); atomicAdd(thrust::raw_pointer_cast(new_sumsG + best_cluster), currentG); atomicAdd(thrust::raw_pointer_cast(new_sumsB + best_cluster), currentB); atomicAdd(thrust::raw_pointer_cast(new_sumsA + best_cluster), currentA); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } __global__ void assignClusters_parallel_LN(thrust::device_ptr<float> data, size_t data_size, const thrust::device_ptr<float> means, thrust::device_ptr<float> new_sums, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_LN(data[index*4], data[index*4+1], data[index*4+2], data[index*4+3], means[cluster*4], means[cluster*4+1], means[cluster*4+2], means[cluster*4+3]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4), data[index*4]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+1), data[index*4+1]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+2), data[index*4+2]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+3), data[index*4+3]); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } // Each thread is one cluster, which just recomputes its coordinates as the mean // of all points assigned to it. __global__ void computeNewMeans_parallel_CL(thrust::device_ptr<float4> means, const thrust::device_ptr<float4> new_sum, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); float4 temp = new_sum[cluster]; temp.x/=count; temp.y/=count; temp.z/=count; temp.w/=count; means[cluster] = temp; } __global__ void computeNewMeans_parallel_4F(thrust::device_ptr<float> meansR, thrust::device_ptr<float> meansG, thrust::device_ptr<float> meansB, thrust::device_ptr<float> meansA, const thrust::device_ptr<float> new_sumR, const thrust::device_ptr<float> new_sumG, const thrust::device_ptr<float> new_sumB, const thrust::device_ptr<float> new_sumA, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); meansR[cluster] = new_sumR[cluster]/count; meansG[cluster] = new_sumG[cluster]/count; meansB[cluster] = new_sumB[cluster]/count; meansA[cluster] = new_sumA[cluster]/count; } __global__ void computeNewMeans_parallel_LN(thrust::device_ptr<float> means, const thrust::device_ptr<float> new_sum, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means[cluster*4] = new_sum[cluster*4] /count; means[cluster*4+1] = new_sum[cluster*4+1]/count; means[cluster*4+2] = new_sum[cluster*4+2]/count; means[cluster*4+3] = new_sum[cluster*4+3]/count; } //=================================================== //tried using shared memory for mean calculation and color assignment /* __global__ void fineReduce_parallel_CL(const thrust::device_ptr<float4> data, const size_t data_size, const thrust::device_ptr<float4> means, const thrust::device_ptr<int> d_assign, const thrust::device_ptr<float4> new_sums, const size_t k, const thrust::device_ptr<int> counts) { extern __shared__ uint8_t shared_memory[]; float4* shared_means = (float4*)(shared_memory); int* shared_counts = (int*)(shared_means+k); const int local_index = threadIdx.x; const int global_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_index >= data_size) return; // Load the mean values into shared memory. if (local_index < k) { shared_means[local_index] = means[local_index]; } __syncthreads(); // Assignment step. // Load once here. const float4 value = data[global_index]; float best_distance = FLT_MAX; int best_cluster = -1; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_CL(value, shared_means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } d_assign[global_index]=best_cluster; __syncthreads(); // Reduction step. const int count = local_index; //const float4 zeroF4(0.f,0.f,0.f,0.f); for (int cluster = 0; cluster < k; ++cluster) { // Zeros if this point (thread) is not assigned to the cluster, else the // values of the point. shared_means[local_index] = (best_cluster == cluster) ? value : float4{0.f,0.f,0.f,0.f}; shared_counts[count] = (best_cluster == cluster) ? 1 : 0; __syncthreads(); // Tree-reduction for this cluster. for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (local_index < stride) { shared_means[local_index] += shared_means[local_index + stride]; shared_counts[count] += shared_counts[count + stride]; } __syncthreads(); } // Now shared_data[0] holds the sum for x. if (local_index == 0) { const int cluster_index = blockIdx.x * k + cluster; new_sums[cluster_index] = shared_means[local_index]; counts[cluster_index] = shared_counts[count]; } __syncthreads(); } } __global__ void coarseReduce_parallel_CL(const thrust::device_ptr<float4> means, const thrust::device_ptr<float4> new_sums, const size_t k, const thrust::device_ptr<int> counts) { extern __shared__ float4 shared_means[]; const int index = threadIdx.x; //const int y_offset = blockDim.x; if(index < k){ // Load into shared memory for more efficient reduction. shared_means[index] = new_sums[index]; } __syncthreads(); for (int stride = blockDim.x / 2; stride >= k; stride /= 2) { if (index < stride) { shared_means[index] += shared_means[index + stride]; } __syncthreads(); } // The first k threads can recompute their clusters' means now. if (index < k) { const int count = max(1, counts[index]); means[index] = new_sums[index] / count; new_sums[index] = float4{0.f}; counts[index] = 0; } } */ //tried using shared memory for mean calculation and color assignment //=================================================== __global__ void writeNewColors_parallel_CL(thrust::device_ptr<float4> means, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float4> newOut) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOut[index] = means[assignment[index]]; } __global__ void writeNewColors_parallel_4F(thrust::device_ptr<float> meansR, thrust::device_ptr<float> meansG, thrust::device_ptr<float> meansB, thrust::device_ptr<float> meansA, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float> newOutR, thrust::device_ptr<float> newOutG, thrust::device_ptr<float> newOutB, thrust::device_ptr<float> newOutA) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOutR[index] = meansR[assignment[index]]; newOutG[index] = meansG[assignment[index]]; newOutB[index] = meansB[assignment[index]]; newOutA[index] = meansA[assignment[index]]; } __global__ void writeNewColors_parallel_LN(thrust::device_ptr<float> means, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float> newOut) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOut[index*4] = means[assignment[index]*4]; newOut[index*4+1] = means[assignment[index]*4+1]; newOut[index*4+2] = means[assignment[index]*4+2]; newOut[index*4+3] = means[assignment[index]*4+3]; } __global__ void calculateDistancesToCentroids_4F(thrust::device_ptr<float4> d_source, const size_t data_size, thrust::device_ptr<float4> d_means, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_CL(d_source[index], d_means[c]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } __global__ void calculateDistancesToCentroids_LN(thrust::device_ptr<float> d_source, const size_t data_size, thrust::device_ptr<float> d_means, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_LN(d_source[index*4],d_source[index*4+1], d_source[index*4+2],d_source[index*4+3], d_means[c*4],d_means[c*4+1], d_means[c*4+2],d_means[c*4+3]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } __global__ void calculateDistancesToCentroids_4V(thrust::device_ptr<float> d_sourceR, thrust::device_ptr<float> d_sourceG, thrust::device_ptr<float> d_sourceB, thrust::device_ptr<float> d_sourceA, const size_t data_size, thrust::device_ptr<float> d_meansR, thrust::device_ptr<float> d_meansG, thrust::device_ptr<float> d_meansB, thrust::device_ptr<float> d_meansA, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_LN(d_sourceR[index],d_sourceG[index], d_sourceB[index],d_sourceA[index], d_meansR[c],d_meansG[c], d_meansB[c],d_meansA[c]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } ///================================================================================= ///----------------------------| END UTILITY |------------------------------------ ///================================================================================= ColorVector gpuKmeans::kmeans_parallel_CV(const ColorVector &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.size(); const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::host_vector<float4> h_source(number_of_elements); for(auto i=0; i<number_of_elements; ++i) { h_source[i].x = source.at(i).m_r; h_source[i].y = source.at(i).m_g; h_source[i].z = source.at(i).m_b; h_source[i].w = source.at(i).m_a; } thrust::device_vector<float4> d_source(source.size()); thrust::copy(h_source.begin(), h_source.end(), d_source.begin()); thrust::device_vector<float4> d_means(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); // Pick centroids as random points from the dataset. /*for(uint cluster=0; cluster<k; ++cluster) { float4 assignment; Color c = source[rfunc->MT19937RandL()]; assignment.x = c.m_r; assignment.y = c.m_g; assignment.z = c.m_b; assignment.w = c.m_a; d_means[cluster] = assignment; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number]; //first mean is random thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { hipLaunchKernelGGL(( calculateDistancesToCentroids_4F), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); hipDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid]=d_source[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(source.size()); //for cluster assignments thrust::device_vector<float4> d_filtered(source.size()); //to copy back and return thrust::host_vector<float4> h_filtered(source.size()); thrust::device_vector<float4> d_sums(k); thrust::device_vector<int> d_counts(k, 0); //const int shared_data1 = (sizeof(int)+sizeof(float4))*numThreads; //const int shared_data2 = sizeof(float4)*k*blocks; for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), float4{0.0,0.0,0.0,0.0}); thrust::fill(d_counts.begin(), d_counts.end(), 0); hipLaunchKernelGGL(( assignClusters_parallel_CL), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); /*fineReduce_parallel_CL<<<blocks, numThreads, shared_data1>>>( d_source.data(), number_of_elements, d_means.data(), d_assignments.data(), d_sums.data(), k, d_counts.data());*/ hipDeviceSynchronize(); hipLaunchKernelGGL(( computeNewMeans_parallel_CL), dim3(1), dim3(k), 0, 0, d_means.data(), d_sums.data(), d_counts.data()); /*const int num = k*blocks; coarseReduce_parallel_CL<<<1, num, shared_data2>>>( d_means.data(), d_sums.data(), k, d_counts.data());*/ hipDeviceSynchronize(); } hipLaunchKernelGGL(( writeNewColors_parallel_CL), dim3(blocks), dim3(numThreads), 0, 0, d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); hipDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); ColorVector ret(source.size()); for(uint i=0; i<source.size(); ++i) { ret.at(i).m_r = h_filtered[i].x; ret.at(i).m_g = h_filtered[i].y; ret.at(i).m_b = h_filtered[i].z; ret.at(i).m_a = h_filtered[i].w; } return ret; } ImageColors gpuKmeans::kmeans_parallel_IC(const ImageColors &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.m_r.size(); const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::host_vector<float4> h_source(number_of_elements); for(uint x=0; x<number_of_elements; ++x) { h_source[x].x=source.m_r.at(x); h_source[x].y=source.m_g.at(x); h_source[x].z=source.m_b.at(x); h_source[x].w=source.m_a.at(x); } thrust::device_vector<float4> d_source(number_of_elements); thrust::copy(h_source.begin(), h_source.end(), d_source.begin()); thrust::device_vector<float4> d_means(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(uint cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); float4 assignment; assignment.x = source.m_r.at(num); assignment.y = source.m_g.at(num); assignment.z = source.m_b.at(num); assignment.w = source.m_a.at(num); d_means[cluster] = assignment; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number]; //first mean is random thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { hipLaunchKernelGGL(( calculateDistancesToCentroids_4F), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); hipDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid]=d_source[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); //for cluster assignments thrust::device_vector<float4> d_filtered(number_of_elements); //to copy back and return thrust::host_vector<float4> h_filtered(number_of_elements); thrust::device_vector<float4> d_sums(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), float4{0.0,0.0,0.0,0.0}); thrust::fill(d_counts.begin(), d_counts.end(), 0); hipLaunchKernelGGL(( assignClusters_parallel_CL), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); hipDeviceSynchronize(); hipLaunchKernelGGL(( computeNewMeans_parallel_CL), dim3(1), dim3(k), 0, 0, d_means.data(), d_sums.data(), d_counts.data()); hipDeviceSynchronize(); } hipLaunchKernelGGL(( writeNewColors_parallel_CL), dim3(blocks), dim3(numThreads), 0, 0, d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); hipDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); //h_source = d_source; ImageColors ret; ret.resize(number_of_elements); for(uint i=0; i<number_of_elements; ++i) { ret.m_r.at(i) = h_filtered[i].x; ret.m_g.at(i) = h_filtered[i].y; ret.m_b.at(i) = h_filtered[i].z; ret.m_a.at(i) = h_filtered[i].w; } return ret; } std::vector<float> gpuKmeans::kmeans_parallel_LN(const std::vector<float> &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.size()/4; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_source(source.size()); thrust::copy(source.begin(), source.end(), d_source.begin()); thrust::device_vector<float> d_means(k*4); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(uint cluster=0; cluster<k; ++cluster) { size_t cID = rfunc.MT19937RandL(); d_means[cluster*4] = source[cID*4]; d_means[cluster*4+1] = source[cID*4+1]; d_means[cluster*4+2] = source[cID*4+2]; d_means[cluster*4+3] = source[cID*4+3]; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number*4]; //first mean is random d_means[1] = d_source[number*4+1]; d_means[2] = d_source[number*4+2]; d_means[3] = d_source[number*4+3]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { hipLaunchKernelGGL(( calculateDistancesToCentroids_LN), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); hipDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid*4]=d_source[randomIDx*4]; d_means[centroid*4+1]=d_source[randomIDx*4+1]; d_means[centroid*4+2]=d_source[randomIDx*4+2]; d_means[centroid*4+3]=d_source[randomIDx*4+3]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(source.size()/4); //for cluster assignments thrust::device_vector<float> d_filtered(source.size()); //to copy back and return thrust::host_vector<float> h_filtered(source.size()); thrust::device_vector<float> d_sums(k*4); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), 0.f); thrust::fill(d_counts.begin(), d_counts.end(), 0); hipLaunchKernelGGL(( assignClusters_parallel_LN), dim3(blocks), dim3(numThreads), 0, 0, d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); hipDeviceSynchronize(); hipLaunchKernelGGL(( computeNewMeans_parallel_LN), dim3(1), dim3(k), 0, 0, d_means.data(), d_sums.data(), d_counts.data()); hipDeviceSynchronize(); } hipLaunchKernelGGL(( writeNewColors_parallel_LN), dim3(blocks), dim3(numThreads), 0, 0, d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); hipDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); std::vector<float> ret(source.size()); for(uint i=0; i<source.size(); ++i) { ret.at(i) = h_filtered[i]; } return ret; } void gpuKmeans::kmeans_parallel_4SV(const std::vector<float>* _inreds, const std::vector<float>* _ingrns, const std::vector<float>* _inblus, const std::vector<float>* _inalps, std::vector<float>* _outreds, std::vector<float>* _outgrns, std::vector<float>* _outblus, std::vector<float>* _outalps, const size_t number_of_elements, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_sourceR(number_of_elements); thrust::device_vector<float> d_sourceG(number_of_elements); thrust::device_vector<float> d_sourceB(number_of_elements); thrust::device_vector<float> d_sourceA(number_of_elements); thrust::copy(_inreds->begin(), _inreds->end(), d_sourceR.begin()); thrust::copy(_ingrns->begin(), _ingrns->end(), d_sourceG.begin()); thrust::copy(_inblus->begin(), _inblus->end(), d_sourceB.begin()); thrust::copy(_inalps->begin(), _inalps->end(), d_sourceA.begin()); thrust::device_vector<float> d_meansR(k); thrust::device_vector<float> d_meansG(k); thrust::device_vector<float> d_meansB(k); thrust::device_vector<float> d_meansA(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(auto cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); d_meansR[cluster] = _inreds->at(num); d_meansG[cluster] = _ingrns->at(num); d_meansB[cluster] = _inblus->at(num); d_meansA[cluster] = _inalps->at(num); }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_meansR[0] = d_sourceR[number]; //first mean is random d_meansG[0] = d_sourceG[number]; d_meansB[0] = d_sourceB[number]; d_meansA[0] = d_sourceA[number]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { hipLaunchKernelGGL(( calculateDistancesToCentroids_4V), dim3(blocks), dim3(numThreads), 0, 0, d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), centroid, d_distances.data(), d_weights.data()); hipDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_meansR[centroid]=d_sourceR[randomIDx]; d_meansG[centroid]=d_sourceG[randomIDx]; d_meansB[centroid]=d_sourceB[randomIDx]; d_meansA[centroid]=d_sourceA[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); thrust::device_vector<float> d_filteredR(number_of_elements); thrust::device_vector<float> d_filteredG(number_of_elements); thrust::device_vector<float> d_filteredB(number_of_elements); thrust::device_vector<float> d_filteredA(number_of_elements); thrust::device_vector<float> d_sumsR(k); thrust::device_vector<float> d_sumsG(k); thrust::device_vector<float> d_sumsB(k); thrust::device_vector<float> d_sumsA(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sumsR.begin(), d_sumsR.end(), float{0.0f}); thrust::fill(d_sumsG.begin(), d_sumsG.end(), float{0.0f}); thrust::fill(d_sumsB.begin(), d_sumsB.end(), float{0.0f}); thrust::fill(d_sumsA.begin(), d_sumsA.end(), float{0.0f}); thrust::fill(d_counts.begin(), d_counts.end(), 0); hipLaunchKernelGGL(( assignClusters_parallel_4F), dim3(blocks), dim3(numThreads), 0, 0, d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), k, d_counts.data(), d_assignments.data()); hipDeviceSynchronize(); hipLaunchKernelGGL(( computeNewMeans_parallel_4F), dim3(1), dim3(k), 0, 0, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), d_counts.data()); hipDeviceSynchronize(); } hipLaunchKernelGGL(( writeNewColors_parallel_4F), dim3(blocks), dim3(numThreads), 0, 0, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), number_of_elements, d_assignments.data(), d_filteredR.data(), d_filteredG.data(), d_filteredB.data(), d_filteredA.data()); hipDeviceSynchronize(); thrust::copy(d_filteredR.begin(), d_filteredR.end(), _outreds->begin()); thrust::copy(d_filteredG.begin(), d_filteredG.end(), _outgrns->begin()); thrust::copy(d_filteredB.begin(), d_filteredB.end(), _outblus->begin()); thrust::copy(d_filteredA.begin(), d_filteredA.end(), _outalps->begin()); return; } void gpuKmeans::kmeans_parallel_4LV(const float* _inreds, const float* _ingrns, const float* _inblus, const float* _inalps, float* _outreds, float* _outgrns, float* _outblus, float* _outalps, const size_t number_of_elements, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_sourceR(number_of_elements); thrust::device_vector<float> d_sourceG(number_of_elements); thrust::device_vector<float> d_sourceB(number_of_elements); thrust::device_vector<float> d_sourceA(number_of_elements); thrust::copy(_inreds, _inreds+number_of_elements, d_sourceR.begin()); thrust::copy(_ingrns, _ingrns+number_of_elements, d_sourceG.begin()); thrust::copy(_inblus, _inblus+number_of_elements, d_sourceB.begin()); thrust::copy(_inalps, _inalps+number_of_elements, d_sourceA.begin()); thrust::device_vector<float> d_meansR(k); thrust::device_vector<float> d_meansG(k); thrust::device_vector<float> d_meansB(k); thrust::device_vector<float> d_meansA(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(auto cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); d_meansR[cluster] = _inreds[num]; d_meansG[cluster] = _ingrns[num]; d_meansB[cluster] = _inblus[num]; d_meansA[cluster] = _inalps[num]; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_meansR[0] = d_sourceR[number]; //first mean is random d_meansG[0] = d_sourceG[number]; d_meansB[0] = d_sourceB[number]; d_meansA[0] = d_sourceA[number]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { hipLaunchKernelGGL(( calculateDistancesToCentroids_4V), dim3(blocks), dim3(numThreads), 0, 0, d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), centroid, d_distances.data(), d_weights.data()); hipDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_meansR[centroid]=d_sourceR[randomIDx]; d_meansG[centroid]=d_sourceG[randomIDx]; d_meansB[centroid]=d_sourceB[randomIDx]; d_meansA[centroid]=d_sourceA[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); thrust::device_vector<float> d_filteredR(number_of_elements); thrust::device_vector<float> d_filteredG(number_of_elements); thrust::device_vector<float> d_filteredB(number_of_elements); thrust::device_vector<float> d_filteredA(number_of_elements); thrust::device_vector<float> d_sumsR(k); thrust::device_vector<float> d_sumsG(k); thrust::device_vector<float> d_sumsB(k); thrust::device_vector<float> d_sumsA(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sumsR.begin(), d_sumsR.end(), float{0.0f}); thrust::fill(d_sumsG.begin(), d_sumsG.end(), float{0.0f}); thrust::fill(d_sumsB.begin(), d_sumsB.end(), float{0.0f}); thrust::fill(d_sumsA.begin(), d_sumsA.end(), float{0.0f}); thrust::fill(d_counts.begin(), d_counts.end(), 0); hipLaunchKernelGGL(( assignClusters_parallel_4F), dim3(blocks), dim3(numThreads), 0, 0, d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), k, d_counts.data(), d_assignments.data()); hipDeviceSynchronize(); hipLaunchKernelGGL(( computeNewMeans_parallel_4F), dim3(1), dim3(k), 0, 0, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), d_counts.data()); hipDeviceSynchronize(); } hipLaunchKernelGGL(( writeNewColors_parallel_4F), dim3(blocks), dim3(numThreads), 0, 0, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), number_of_elements, d_assignments.data(), d_filteredR.data(), d_filteredG.data(), d_filteredB.data(), d_filteredA.data()); hipDeviceSynchronize(); thrust::copy(d_filteredR.begin(), d_filteredR.end(), _outreds); thrust::copy(d_filteredG.begin(), d_filteredG.end(), _outgrns); thrust::copy(d_filteredB.begin(), d_filteredB.end(), _outblus); thrust::copy(d_filteredA.begin(), d_filteredA.end(), _outalps); return; }
c6d6fda1172f858ada3ab6392c70c03dd957fc11.cu
#include <cuda_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include "gpuKmeans.h" #include "helper_math.h" #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif __device__ float sq_device(float ref) { return ref*ref; } __device__ float sqL2Dist_device_CL(float4 first, float4 second) { return sq_device(first.x*first.w - second.x*second.w) + sq_device(first.y*first.w - second.y*second.w) + sq_device(first.z*first.w - second.z*second.w); } __device__ float sqL2Dist_device_LN(float FR, float FG, float FB, float FA, float SR, float SG, float SB, float SA) { return sq_device(FR*FA - SR*SA) + sq_device(FG*FA - SG*SA) + sq_device(FB*FA - SB*SA); } // In the assignment step, each point (thread) computes its distance to each // cluster centroid and adds its value to the sum of its closest // centroid, as well as incrementing that centroid's count of assigned points. __global__ void assignClusters_parallel_CL(thrust::device_ptr<float4> data, size_t data_size, const thrust::device_ptr<float4> means, thrust::device_ptr<float4> new_sums, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { //extern __shared__ float4 shared_means[]; const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // Let the first k threads copy over the cluster means. /*if (threadIdx.x < k) { // Using a flat array shared_means[threadIdx.x] = means[threadIdx.x]; }*/ //thrust::copy(thrust::raw_pointer_cast(means), thrust::raw_pointer_cast(means+k), &shared_means[0]); //__syncthreads(); // Make global loads once. const float4 current = data[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_CL(current, means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->x, current.x); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->y, current.y); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->z, current.z); atomicAdd(&thrust::raw_pointer_cast(new_sums + best_cluster)->w, current.w); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } __global__ void assignClusters_parallel_4F(thrust::device_ptr<float> inRed, thrust::device_ptr<float> inGrn, thrust::device_ptr<float> inBlu, thrust::device_ptr<float> inAlp, size_t data_size, const thrust::device_ptr<float> meansR, const thrust::device_ptr<float> meansG, const thrust::device_ptr<float> meansB, const thrust::device_ptr<float> meansA, thrust::device_ptr<float> new_sumsR, thrust::device_ptr<float> new_sumsG, thrust::device_ptr<float> new_sumsB, thrust::device_ptr<float> new_sumsA, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; // Make global loads once. const float currentR = inRed[index]; const float currentG = inGrn[index]; const float currentB = inBlu[index]; const float currentA = inAlp[index]; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_LN(currentR,currentG,currentB,currentA, meansR[cluster], meansG[cluster], meansB[cluster], meansA[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(thrust::raw_pointer_cast(new_sumsR + best_cluster), currentR); atomicAdd(thrust::raw_pointer_cast(new_sumsG + best_cluster), currentG); atomicAdd(thrust::raw_pointer_cast(new_sumsB + best_cluster), currentB); atomicAdd(thrust::raw_pointer_cast(new_sumsA + best_cluster), currentA); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } __global__ void assignClusters_parallel_LN(thrust::device_ptr<float> data, size_t data_size, const thrust::device_ptr<float> means, thrust::device_ptr<float> new_sums, size_t k, thrust::device_ptr<int> counts, thrust::device_ptr<int> d_assign) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; float best_distance = FLT_MAX; int best_cluster = 0; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_LN(data[index*4], data[index*4+1], data[index*4+2], data[index*4+3], means[cluster*4], means[cluster*4+1], means[cluster*4+2], means[cluster*4+3]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4), data[index*4]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+1), data[index*4+1]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+2), data[index*4+2]); atomicAdd(thrust::raw_pointer_cast(new_sums + best_cluster*4+3), data[index*4+3]); atomicAdd(thrust::raw_pointer_cast(counts + best_cluster), 1); d_assign[index]=best_cluster; } // Each thread is one cluster, which just recomputes its coordinates as the mean // of all points assigned to it. __global__ void computeNewMeans_parallel_CL(thrust::device_ptr<float4> means, const thrust::device_ptr<float4> new_sum, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); float4 temp = new_sum[cluster]; temp.x/=count; temp.y/=count; temp.z/=count; temp.w/=count; means[cluster] = temp; } __global__ void computeNewMeans_parallel_4F(thrust::device_ptr<float> meansR, thrust::device_ptr<float> meansG, thrust::device_ptr<float> meansB, thrust::device_ptr<float> meansA, const thrust::device_ptr<float> new_sumR, const thrust::device_ptr<float> new_sumG, const thrust::device_ptr<float> new_sumB, const thrust::device_ptr<float> new_sumA, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); meansR[cluster] = new_sumR[cluster]/count; meansG[cluster] = new_sumG[cluster]/count; meansB[cluster] = new_sumB[cluster]/count; meansA[cluster] = new_sumA[cluster]/count; } __global__ void computeNewMeans_parallel_LN(thrust::device_ptr<float> means, const thrust::device_ptr<float> new_sum, const thrust::device_ptr<int> counts) { const int cluster = threadIdx.x; const int count = max(1, counts[cluster]); means[cluster*4] = new_sum[cluster*4] /count; means[cluster*4+1] = new_sum[cluster*4+1]/count; means[cluster*4+2] = new_sum[cluster*4+2]/count; means[cluster*4+3] = new_sum[cluster*4+3]/count; } //=================================================== //tried using shared memory for mean calculation and color assignment /* __global__ void fineReduce_parallel_CL(const thrust::device_ptr<float4> data, const size_t data_size, const thrust::device_ptr<float4> means, const thrust::device_ptr<int> d_assign, const thrust::device_ptr<float4> new_sums, const size_t k, const thrust::device_ptr<int> counts) { extern __shared__ uint8_t shared_memory[]; float4* shared_means = (float4*)(shared_memory); int* shared_counts = (int*)(shared_means+k); const int local_index = threadIdx.x; const int global_index = blockIdx.x * blockDim.x + threadIdx.x; if (global_index >= data_size) return; // Load the mean values into shared memory. if (local_index < k) { shared_means[local_index] = means[local_index]; } __syncthreads(); // Assignment step. // Load once here. const float4 value = data[global_index]; float best_distance = FLT_MAX; int best_cluster = -1; for (int cluster = 0; cluster < k; ++cluster) { const float distance = sqL2Dist_device_CL(value, shared_means[cluster]); if (distance < best_distance) { best_distance = distance; best_cluster = cluster; } } d_assign[global_index]=best_cluster; __syncthreads(); // Reduction step. const int count = local_index; //const float4 zeroF4(0.f,0.f,0.f,0.f); for (int cluster = 0; cluster < k; ++cluster) { // Zeros if this point (thread) is not assigned to the cluster, else the // values of the point. shared_means[local_index] = (best_cluster == cluster) ? value : float4{0.f,0.f,0.f,0.f}; shared_counts[count] = (best_cluster == cluster) ? 1 : 0; __syncthreads(); // Tree-reduction for this cluster. for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (local_index < stride) { shared_means[local_index] += shared_means[local_index + stride]; shared_counts[count] += shared_counts[count + stride]; } __syncthreads(); } // Now shared_data[0] holds the sum for x. if (local_index == 0) { const int cluster_index = blockIdx.x * k + cluster; new_sums[cluster_index] = shared_means[local_index]; counts[cluster_index] = shared_counts[count]; } __syncthreads(); } } __global__ void coarseReduce_parallel_CL(const thrust::device_ptr<float4> means, const thrust::device_ptr<float4> new_sums, const size_t k, const thrust::device_ptr<int> counts) { extern __shared__ float4 shared_means[]; const int index = threadIdx.x; //const int y_offset = blockDim.x; if(index < k){ // Load into shared memory for more efficient reduction. shared_means[index] = new_sums[index]; } __syncthreads(); for (int stride = blockDim.x / 2; stride >= k; stride /= 2) { if (index < stride) { shared_means[index] += shared_means[index + stride]; } __syncthreads(); } // The first k threads can recompute their clusters' means now. if (index < k) { const int count = max(1, counts[index]); means[index] = new_sums[index] / count; new_sums[index] = float4{0.f}; counts[index] = 0; } } */ //tried using shared memory for mean calculation and color assignment //=================================================== __global__ void writeNewColors_parallel_CL(thrust::device_ptr<float4> means, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float4> newOut) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOut[index] = means[assignment[index]]; } __global__ void writeNewColors_parallel_4F(thrust::device_ptr<float> meansR, thrust::device_ptr<float> meansG, thrust::device_ptr<float> meansB, thrust::device_ptr<float> meansA, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float> newOutR, thrust::device_ptr<float> newOutG, thrust::device_ptr<float> newOutB, thrust::device_ptr<float> newOutA) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOutR[index] = meansR[assignment[index]]; newOutG[index] = meansG[assignment[index]]; newOutB[index] = meansB[assignment[index]]; newOutA[index] = meansA[assignment[index]]; } __global__ void writeNewColors_parallel_LN(thrust::device_ptr<float> means, size_t data_size, thrust::device_ptr<int> assignment, thrust::device_ptr<float> newOut) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; newOut[index*4] = means[assignment[index]*4]; newOut[index*4+1] = means[assignment[index]*4+1]; newOut[index*4+2] = means[assignment[index]*4+2]; newOut[index*4+3] = means[assignment[index]*4+3]; } __global__ void calculateDistancesToCentroids_4F(thrust::device_ptr<float4> d_source, const size_t data_size, thrust::device_ptr<float4> d_means, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_CL(d_source[index], d_means[c]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } __global__ void calculateDistancesToCentroids_LN(thrust::device_ptr<float> d_source, const size_t data_size, thrust::device_ptr<float> d_means, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_LN(d_source[index*4],d_source[index*4+1], d_source[index*4+2],d_source[index*4+3], d_means[c*4],d_means[c*4+1], d_means[c*4+2],d_means[c*4+3]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } __global__ void calculateDistancesToCentroids_4V(thrust::device_ptr<float> d_sourceR, thrust::device_ptr<float> d_sourceG, thrust::device_ptr<float> d_sourceB, thrust::device_ptr<float> d_sourceA, const size_t data_size, thrust::device_ptr<float> d_meansR, thrust::device_ptr<float> d_meansG, thrust::device_ptr<float> d_meansB, thrust::device_ptr<float> d_meansA, const size_t current, thrust::device_ptr<float> d_dist, thrust::device_ptr<size_t> d_weights) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= data_size) return; for(auto c=0; c<current; ++c) { d_dist[index]=sqL2Dist_device_LN(d_sourceR[index],d_sourceG[index], d_sourceB[index],d_sourceA[index], d_meansR[c],d_meansG[c], d_meansB[c],d_meansA[c]); d_weights[index]=d_weights[index]+(size_t)(d_dist[index]*1000.f); } } ///================================================================================= ///----------------------------| END UTILITY |------------------------------------ ///================================================================================= ColorVector gpuKmeans::kmeans_parallel_CV(const ColorVector &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.size(); const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::host_vector<float4> h_source(number_of_elements); for(auto i=0; i<number_of_elements; ++i) { h_source[i].x = source.at(i).m_r; h_source[i].y = source.at(i).m_g; h_source[i].z = source.at(i).m_b; h_source[i].w = source.at(i).m_a; } thrust::device_vector<float4> d_source(source.size()); thrust::copy(h_source.begin(), h_source.end(), d_source.begin()); thrust::device_vector<float4> d_means(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); // Pick centroids as random points from the dataset. /*for(uint cluster=0; cluster<k; ++cluster) { float4 assignment; Color c = source[rfunc->MT19937RandL()]; assignment.x = c.m_r; assignment.y = c.m_g; assignment.z = c.m_b; assignment.w = c.m_a; d_means[cluster] = assignment; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number]; //first mean is random thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { calculateDistancesToCentroids_4F<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); cudaDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid]=d_source[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(source.size()); //for cluster assignments thrust::device_vector<float4> d_filtered(source.size()); //to copy back and return thrust::host_vector<float4> h_filtered(source.size()); thrust::device_vector<float4> d_sums(k); thrust::device_vector<int> d_counts(k, 0); //const int shared_data1 = (sizeof(int)+sizeof(float4))*numThreads; //const int shared_data2 = sizeof(float4)*k*blocks; for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), float4{0.0,0.0,0.0,0.0}); thrust::fill(d_counts.begin(), d_counts.end(), 0); assignClusters_parallel_CL<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); /*fineReduce_parallel_CL<<<blocks, numThreads, shared_data1>>>( d_source.data(), number_of_elements, d_means.data(), d_assignments.data(), d_sums.data(), k, d_counts.data());*/ cudaDeviceSynchronize(); computeNewMeans_parallel_CL<<<1, k>>>(d_means.data(), d_sums.data(), d_counts.data()); /*const int num = k*blocks; coarseReduce_parallel_CL<<<1, num, shared_data2>>>( d_means.data(), d_sums.data(), k, d_counts.data());*/ cudaDeviceSynchronize(); } writeNewColors_parallel_CL<<<blocks, numThreads>>>(d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); cudaDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); ColorVector ret(source.size()); for(uint i=0; i<source.size(); ++i) { ret.at(i).m_r = h_filtered[i].x; ret.at(i).m_g = h_filtered[i].y; ret.at(i).m_b = h_filtered[i].z; ret.at(i).m_a = h_filtered[i].w; } return ret; } ImageColors gpuKmeans::kmeans_parallel_IC(const ImageColors &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.m_r.size(); const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::host_vector<float4> h_source(number_of_elements); for(uint x=0; x<number_of_elements; ++x) { h_source[x].x=source.m_r.at(x); h_source[x].y=source.m_g.at(x); h_source[x].z=source.m_b.at(x); h_source[x].w=source.m_a.at(x); } thrust::device_vector<float4> d_source(number_of_elements); thrust::copy(h_source.begin(), h_source.end(), d_source.begin()); thrust::device_vector<float4> d_means(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(uint cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); float4 assignment; assignment.x = source.m_r.at(num); assignment.y = source.m_g.at(num); assignment.z = source.m_b.at(num); assignment.w = source.m_a.at(num); d_means[cluster] = assignment; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number]; //first mean is random thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { calculateDistancesToCentroids_4F<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); cudaDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid]=d_source[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); //for cluster assignments thrust::device_vector<float4> d_filtered(number_of_elements); //to copy back and return thrust::host_vector<float4> h_filtered(number_of_elements); thrust::device_vector<float4> d_sums(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), float4{0.0,0.0,0.0,0.0}); thrust::fill(d_counts.begin(), d_counts.end(), 0); assignClusters_parallel_CL<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); cudaDeviceSynchronize(); computeNewMeans_parallel_CL<<<1, k>>>(d_means.data(), d_sums.data(), d_counts.data()); cudaDeviceSynchronize(); } writeNewColors_parallel_CL<<<blocks, numThreads>>>(d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); cudaDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); //h_source = d_source; ImageColors ret; ret.resize(number_of_elements); for(uint i=0; i<number_of_elements; ++i) { ret.m_r.at(i) = h_filtered[i].x; ret.m_g.at(i) = h_filtered[i].y; ret.m_b.at(i) = h_filtered[i].z; ret.m_a.at(i) = h_filtered[i].w; } return ret; } std::vector<float> gpuKmeans::kmeans_parallel_LN(const std::vector<float> &source, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const size_t number_of_elements = source.size()/4; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_source(source.size()); thrust::copy(source.begin(), source.end(), d_source.begin()); thrust::device_vector<float> d_means(k*4); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(uint cluster=0; cluster<k; ++cluster) { size_t cID = rfunc.MT19937RandL(); d_means[cluster*4] = source[cID*4]; d_means[cluster*4+1] = source[cID*4+1]; d_means[cluster*4+2] = source[cID*4+2]; d_means[cluster*4+3] = source[cID*4+3]; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_means[0] = d_source[number*4]; //first mean is random d_means[1] = d_source[number*4+1]; d_means[2] = d_source[number*4+2]; d_means[3] = d_source[number*4+3]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { calculateDistancesToCentroids_LN<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), centroid, d_distances.data(), d_weights.data()); cudaDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_means[centroid*4]=d_source[randomIDx*4]; d_means[centroid*4+1]=d_source[randomIDx*4+1]; d_means[centroid*4+2]=d_source[randomIDx*4+2]; d_means[centroid*4+3]=d_source[randomIDx*4+3]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(source.size()/4); //for cluster assignments thrust::device_vector<float> d_filtered(source.size()); //to copy back and return thrust::host_vector<float> h_filtered(source.size()); thrust::device_vector<float> d_sums(k*4); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sums.begin(), d_sums.end(), 0.f); thrust::fill(d_counts.begin(), d_counts.end(), 0); assignClusters_parallel_LN<<<blocks, numThreads>>>(d_source.data(), number_of_elements, d_means.data(), d_sums.data(), k, d_counts.data(), d_assignments.data()); cudaDeviceSynchronize(); computeNewMeans_parallel_LN<<<1, k>>>(d_means.data(), d_sums.data(), d_counts.data()); cudaDeviceSynchronize(); } writeNewColors_parallel_LN<<<blocks, numThreads>>>(d_means.data(), number_of_elements, d_assignments.data(), d_filtered.data()); cudaDeviceSynchronize(); thrust::copy(d_filtered.begin(), d_filtered.end(), h_filtered.begin()); std::vector<float> ret(source.size()); for(uint i=0; i<source.size(); ++i) { ret.at(i) = h_filtered[i]; } return ret; } void gpuKmeans::kmeans_parallel_4SV(const std::vector<float>* _inreds, const std::vector<float>* _ingrns, const std::vector<float>* _inblus, const std::vector<float>* _inalps, std::vector<float>* _outreds, std::vector<float>* _outgrns, std::vector<float>* _outblus, std::vector<float>* _outalps, const size_t number_of_elements, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_sourceR(number_of_elements); thrust::device_vector<float> d_sourceG(number_of_elements); thrust::device_vector<float> d_sourceB(number_of_elements); thrust::device_vector<float> d_sourceA(number_of_elements); thrust::copy(_inreds->begin(), _inreds->end(), d_sourceR.begin()); thrust::copy(_ingrns->begin(), _ingrns->end(), d_sourceG.begin()); thrust::copy(_inblus->begin(), _inblus->end(), d_sourceB.begin()); thrust::copy(_inalps->begin(), _inalps->end(), d_sourceA.begin()); thrust::device_vector<float> d_meansR(k); thrust::device_vector<float> d_meansG(k); thrust::device_vector<float> d_meansB(k); thrust::device_vector<float> d_meansA(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(auto cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); d_meansR[cluster] = _inreds->at(num); d_meansG[cluster] = _ingrns->at(num); d_meansB[cluster] = _inblus->at(num); d_meansA[cluster] = _inalps->at(num); }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_meansR[0] = d_sourceR[number]; //first mean is random d_meansG[0] = d_sourceG[number]; d_meansB[0] = d_sourceB[number]; d_meansA[0] = d_sourceA[number]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { calculateDistancesToCentroids_4V<<<blocks, numThreads>>>(d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), centroid, d_distances.data(), d_weights.data()); cudaDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_meansR[centroid]=d_sourceR[randomIDx]; d_meansG[centroid]=d_sourceG[randomIDx]; d_meansB[centroid]=d_sourceB[randomIDx]; d_meansA[centroid]=d_sourceA[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); thrust::device_vector<float> d_filteredR(number_of_elements); thrust::device_vector<float> d_filteredG(number_of_elements); thrust::device_vector<float> d_filteredB(number_of_elements); thrust::device_vector<float> d_filteredA(number_of_elements); thrust::device_vector<float> d_sumsR(k); thrust::device_vector<float> d_sumsG(k); thrust::device_vector<float> d_sumsB(k); thrust::device_vector<float> d_sumsA(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sumsR.begin(), d_sumsR.end(), float{0.0f}); thrust::fill(d_sumsG.begin(), d_sumsG.end(), float{0.0f}); thrust::fill(d_sumsB.begin(), d_sumsB.end(), float{0.0f}); thrust::fill(d_sumsA.begin(), d_sumsA.end(), float{0.0f}); thrust::fill(d_counts.begin(), d_counts.end(), 0); assignClusters_parallel_4F<<<blocks, numThreads>>>(d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), k, d_counts.data(), d_assignments.data()); cudaDeviceSynchronize(); computeNewMeans_parallel_4F<<<1, k>>>(d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), d_counts.data()); cudaDeviceSynchronize(); } writeNewColors_parallel_4F<<<blocks, numThreads>>>(d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), number_of_elements, d_assignments.data(), d_filteredR.data(), d_filteredG.data(), d_filteredB.data(), d_filteredA.data()); cudaDeviceSynchronize(); thrust::copy(d_filteredR.begin(), d_filteredR.end(), _outreds->begin()); thrust::copy(d_filteredG.begin(), d_filteredG.end(), _outgrns->begin()); thrust::copy(d_filteredB.begin(), d_filteredB.end(), _outblus->begin()); thrust::copy(d_filteredA.begin(), d_filteredA.end(), _outalps->begin()); return; } void gpuKmeans::kmeans_parallel_4LV(const float* _inreds, const float* _ingrns, const float* _inblus, const float* _inalps, float* _outreds, float* _outgrns, float* _outblus, float* _outalps, const size_t number_of_elements, size_t k, size_t number_of_iterations, const size_t numThreads) { RandomFn<float> rfunc; const int blocks = (number_of_elements + numThreads - 1) / numThreads; thrust::device_vector<float> d_sourceR(number_of_elements); thrust::device_vector<float> d_sourceG(number_of_elements); thrust::device_vector<float> d_sourceB(number_of_elements); thrust::device_vector<float> d_sourceA(number_of_elements); thrust::copy(_inreds, _inreds+number_of_elements, d_sourceR.begin()); thrust::copy(_ingrns, _ingrns+number_of_elements, d_sourceG.begin()); thrust::copy(_inblus, _inblus+number_of_elements, d_sourceB.begin()); thrust::copy(_inalps, _inalps+number_of_elements, d_sourceA.begin()); thrust::device_vector<float> d_meansR(k); thrust::device_vector<float> d_meansG(k); thrust::device_vector<float> d_meansB(k); thrust::device_vector<float> d_meansA(k); rfunc.setNumericLimitsL(0, number_of_elements - 1); /*for(auto cluster=0; cluster<k; ++cluster) { size_t num = rfunc.MT19937RandL(); d_meansR[cluster] = _inreds[num]; d_meansG[cluster] = _ingrns[num]; d_meansB[cluster] = _inblus[num]; d_meansA[cluster] = _inalps[num]; }*/ //Pick Centroids according to kmeans++ method by getting distances to all points size_t number = rfunc.MT19937RandL(); d_meansR[0] = d_sourceR[number]; //first mean is random d_meansG[0] = d_sourceG[number]; d_meansB[0] = d_sourceB[number]; d_meansA[0] = d_sourceA[number]; thrust::device_vector<float> d_distances(number_of_elements, 0.f); thrust::device_vector<size_t> d_weights(number_of_elements, 0); //thrust::device_vector<float> d_totalDistance(k, 0.f); std::vector<size_t> tWeights(number_of_elements); for(auto centroid=1; centroid<k; ++centroid) { calculateDistancesToCentroids_4V<<<blocks, numThreads>>>(d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), centroid, d_distances.data(), d_weights.data()); cudaDeviceSynchronize(); thrust::copy(d_weights.begin(), d_weights.end(), tWeights.begin()); size_t randomIDx = rfunc.weightedRand(tWeights); d_meansR[centroid]=d_sourceR[randomIDx]; d_meansG[centroid]=d_sourceG[randomIDx]; d_meansB[centroid]=d_sourceB[randomIDx]; d_meansA[centroid]=d_sourceA[randomIDx]; thrust::fill(d_distances.begin(), d_distances.end(), 0.f); } //end of centoid picking thrust::device_vector<int> d_assignments(number_of_elements); thrust::device_vector<float> d_filteredR(number_of_elements); thrust::device_vector<float> d_filteredG(number_of_elements); thrust::device_vector<float> d_filteredB(number_of_elements); thrust::device_vector<float> d_filteredA(number_of_elements); thrust::device_vector<float> d_sumsR(k); thrust::device_vector<float> d_sumsG(k); thrust::device_vector<float> d_sumsB(k); thrust::device_vector<float> d_sumsA(k); thrust::device_vector<int> d_counts(k, 0); for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { thrust::fill(d_sumsR.begin(), d_sumsR.end(), float{0.0f}); thrust::fill(d_sumsG.begin(), d_sumsG.end(), float{0.0f}); thrust::fill(d_sumsB.begin(), d_sumsB.end(), float{0.0f}); thrust::fill(d_sumsA.begin(), d_sumsA.end(), float{0.0f}); thrust::fill(d_counts.begin(), d_counts.end(), 0); assignClusters_parallel_4F<<<blocks, numThreads>>>(d_sourceR.data(), d_sourceG.data(), d_sourceB.data(), d_sourceA.data(), number_of_elements, d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), k, d_counts.data(), d_assignments.data()); cudaDeviceSynchronize(); computeNewMeans_parallel_4F<<<1, k>>>(d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), d_sumsR.data(), d_sumsG.data(), d_sumsB.data(), d_sumsA.data(), d_counts.data()); cudaDeviceSynchronize(); } writeNewColors_parallel_4F<<<blocks, numThreads>>>(d_meansR.data(), d_meansG.data(), d_meansB.data(), d_meansA.data(), number_of_elements, d_assignments.data(), d_filteredR.data(), d_filteredG.data(), d_filteredB.data(), d_filteredA.data()); cudaDeviceSynchronize(); thrust::copy(d_filteredR.begin(), d_filteredR.end(), _outreds); thrust::copy(d_filteredG.begin(), d_filteredG.end(), _outgrns); thrust::copy(d_filteredB.begin(), d_filteredB.end(), _outblus); thrust::copy(d_filteredA.begin(), d_filteredA.end(), _outalps); return; }
14b982cb5febeb6fd517d52e1088fa7a3c116bda.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/st_pt_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerPTForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } //m = floor(x); n = floor(y); w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x) + 1; n = floor(y); w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x); n = floor(y) + 1; w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x) + 1; n = floor(y) + 1; w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} } } template <typename Dtype> __global__ void overflow_test(const int nthreads, int N, int output_H_, int output_W_, Dtype* input_grid_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int i = index / (output_W_ * output_H_); //Dtype pw = input_grid_data[3 * index + 2]; //if (pw < 0.000001 && pw > -0.000001) { // if (pw > 0) { // input_grid_data[3 * index + 2] = 0.0001; // } // else { // input_grid_data[3 * index + 2] = -0.0001; // } //} Dtype pw = input_grid_data[index * 3 + 2]; input_grid_data[index * 3] = input_grid_data[index * 3] / pw; input_grid_data[index * 3 + 1] = input_grid_data[index * 3 + 1] / pw; } } template <typename Dtype> void SpatialTransformerPTLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerPTLayer::Forward_gpu::\t"; //Forward_cpu(bottom, top); const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); Dtype* output_grid_data = output_grid.mutable_gpu_data(); //std::cout << "output_grid data sync end " << std::endl; Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta int k = 0; const int num_threads = N; for(int i=0; i<9; ++i) { if (is_pre_defined_theta[i]) { set_value_to_constant<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >( num_threads, pre_defined_theta[i], 9, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/9 of full_theta_data" << std::endl; } else { copy_values<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >(num_threads, 9 - pre_defined_count, k, theta, 9, i, full_theta_data); //std::cout << "Copying " << k << "/" << 9 - pre_defined_count << " of theta to " // << i << "/9 of full_theta_data" << std::endl; ++k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 3, 3, (Dtype)1., output_grid_data, full_theta_data + 9 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 3) * i); } test_defined_count = test_defined_count + 1; ////if (test_defined_count == 1000) //{ // std::cout << "dw dw dw" << std::endl; // const Dtype* full_theta_test = full_theta.cpu_data(); // for (int index = 0; index < full_theta.count(); ++index) { // Dtype theta = full_theta_test[index]; // std::cout << theta << " "; // } // std::cout << std::endl << std::endl; //} //******be care overfitting.******** no bug //const int gpu_nthreads = N * output_H_ * output_W_; //overflow_test<Dtype> << <CAFFE_GET_BLOCKS(gpu_nthreads), // CAFFE_CUDA_NUM_THREADS >> >(gpu_nthreads, N, output_H_, output_W_, input_grid_data); //if (test_defined_count == 1) //{ // std::cout << "dw dw dw" << std::endl; // const Dtype* input_grid_data_test = input_grid.cpu_data(); // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index + 2]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // std::cout << "dx dx dx" << std::endl; // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // std::cout << "dy dy dy" << std::endl; // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index + 1]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // //Dtype* break_ptr = 0; // //*break_ptr = 1; //} //std::cout << output_H_ << " " << output_W_ << " " << H << " " << W << " " << N << " " << C << " "; #if 1 const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerPTForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); #endif #if 0 const Dtype* input_grid_cpu_data = input_grid.cpu_data(); const Dtype* cpu_U = bottom[0]->cpu_data(); Dtype* cpu_V = top[0]->mutable_cpu_data(); for (int i = 0; i < N; ++i) { const Dtype* coordinates = input_grid_cpu_data + (output_H_ * output_W_ * 3) * i; int row_idx; Dtype px, py; for (int j = 0; j < C; ++j) for (int s = 0; s < output_H_; ++s) for (int t = 0; t < output_W_; ++t) { row_idx = output_W_ * s + t; px = coordinates[row_idx * 3] / coordinates[row_idx * 3 + 2]; py = coordinates[row_idx * 3 + 1] / coordinates[row_idx * 3 + 2]; cpu_V[top[0]->offset(i, j, s, t)] = transform_forward_cpu( cpu_U + bottom[0]->offset(i, j, 0, 0), px, py); } } #endif } template <typename Dtype> __global__ void SpatialTransformerPTBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; Dtype delta_dpw = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } delta_dpw = delta_dpx*(-coordinates[row_idx * 3])/ (pw*pw) + delta_dpy*(-coordinates[row_idx * 3+1]) / (pw*pw); //******be care overfitting.******** //delta_dpw = delta_dpx*(-px)/pw + delta_dpy*(-py) / pw; int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(9 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx / pw; dTheta_tmp_diff[(9 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy / pw; dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpw * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpw * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpw; } } template <typename Dtype> __global__ void SpatialTransformerPTBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerPTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerPTLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerPTBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<9; ++i) { if (!is_pre_defined_theta[i]) { copy_values<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >(num_threads, 9, i, dFull_theta, 9 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/9 of dFull_theta to " << k << "/" << // 9 - pre_defined_count << " of dTheta" << std::endl; ++k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerPTBackwardGPU_dU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerPTLayer); } // namespace caffe
14b982cb5febeb6fd517d52e1088fa7a3c116bda.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/st_pt_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerPTForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } //m = floor(x); n = floor(y); w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x) + 1; n = floor(y); w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x); n = floor(y) + 1; w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} //m = floor(x) + 1; n = floor(y) + 1; w = (Dtype)0; //if (m >= 0 && m < H && n >= 0 && n < W) { // w = max((Dtype)0, (Dtype)1 - abs(x - m)) * max((Dtype)0, (Dtype)1 - abs(y - n)); // V[V_offset] += w * pic[m * W + n]; //} } } template <typename Dtype> __global__ void overflow_test(const int nthreads, int N, int output_H_, int output_W_, Dtype* input_grid_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int i = index / (output_W_ * output_H_); //Dtype pw = input_grid_data[3 * index + 2]; //if (pw < 0.000001 && pw > -0.000001) { // if (pw > 0) { // input_grid_data[3 * index + 2] = 0.0001; // } // else { // input_grid_data[3 * index + 2] = -0.0001; // } //} Dtype pw = input_grid_data[index * 3 + 2]; input_grid_data[index * 3] = input_grid_data[index * 3] / pw; input_grid_data[index * 3 + 1] = input_grid_data[index * 3 + 1] / pw; } } template <typename Dtype> void SpatialTransformerPTLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerPTLayer::Forward_gpu::\t"; //Forward_cpu(bottom, top); const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); Dtype* output_grid_data = output_grid.mutable_gpu_data(); //std::cout << "output_grid data sync end " << std::endl; Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta int k = 0; const int num_threads = N; for(int i=0; i<9; ++i) { if (is_pre_defined_theta[i]) { set_value_to_constant<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >( num_threads, pre_defined_theta[i], 9, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/9 of full_theta_data" << std::endl; } else { copy_values<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >(num_threads, 9 - pre_defined_count, k, theta, 9, i, full_theta_data); //std::cout << "Copying " << k << "/" << 9 - pre_defined_count << " of theta to " // << i << "/9 of full_theta_data" << std::endl; ++k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 3, 3, (Dtype)1., output_grid_data, full_theta_data + 9 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 3) * i); } test_defined_count = test_defined_count + 1; ////if (test_defined_count == 1000) //{ // std::cout << "dw dw dw" << std::endl; // const Dtype* full_theta_test = full_theta.cpu_data(); // for (int index = 0; index < full_theta.count(); ++index) { // Dtype theta = full_theta_test[index]; // std::cout << theta << " "; // } // std::cout << std::endl << std::endl; //} //******be care overfitting.******** no bug //const int gpu_nthreads = N * output_H_ * output_W_; //overflow_test<Dtype> << <CAFFE_GET_BLOCKS(gpu_nthreads), // CAFFE_CUDA_NUM_THREADS >> >(gpu_nthreads, N, output_H_, output_W_, input_grid_data); //if (test_defined_count == 1) //{ // std::cout << "dw dw dw" << std::endl; // const Dtype* input_grid_data_test = input_grid.cpu_data(); // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index + 2]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // std::cout << "dx dx dx" << std::endl; // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // std::cout << "dy dy dy" << std::endl; // for (int index = 0; index < output_H_ * output_W_; ++index) { // Dtype pw = input_grid_data_test[3 * index + 1]; // std::cout << pw << " "; // } // std::cout << std::endl << std::endl; // //Dtype* break_ptr = 0; // //*break_ptr = 1; //} //std::cout << output_H_ << " " << output_W_ << " " << H << " " << W << " " << N << " " << C << " "; #if 1 const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerPTForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); #endif #if 0 const Dtype* input_grid_cpu_data = input_grid.cpu_data(); const Dtype* cpu_U = bottom[0]->cpu_data(); Dtype* cpu_V = top[0]->mutable_cpu_data(); for (int i = 0; i < N; ++i) { const Dtype* coordinates = input_grid_cpu_data + (output_H_ * output_W_ * 3) * i; int row_idx; Dtype px, py; for (int j = 0; j < C; ++j) for (int s = 0; s < output_H_; ++s) for (int t = 0; t < output_W_; ++t) { row_idx = output_W_ * s + t; px = coordinates[row_idx * 3] / coordinates[row_idx * 3 + 2]; py = coordinates[row_idx * 3 + 1] / coordinates[row_idx * 3 + 2]; cpu_V[top[0]->offset(i, j, s, t)] = transform_forward_cpu( cpu_U + bottom[0]->offset(i, j, 0, 0), px, py); } } #endif } template <typename Dtype> __global__ void SpatialTransformerPTBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; Dtype delta_dpw = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } delta_dpw = delta_dpx*(-coordinates[row_idx * 3])/ (pw*pw) + delta_dpy*(-coordinates[row_idx * 3+1]) / (pw*pw); //******be care overfitting.******** //delta_dpw = delta_dpx*(-px)/pw + delta_dpy*(-py) / pw; int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(9 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx / pw; dTheta_tmp_diff[(9 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1) / pw; dTheta_tmp_diff[(9 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy / pw; dTheta_tmp_diff[(9 * i + 6) * (output_H_ * output_W_ * C) + idx] += delta_dpw * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(9 * i + 7) * (output_H_ * output_W_ * C) + idx] += delta_dpw * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(9 * i + 8) * (output_H_ * output_W_ * C) + idx] += delta_dpw; } } template <typename Dtype> __global__ void SpatialTransformerPTBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 3) * i; const int row_idx = output_W_ * s + t; const Dtype pw = coordinates[row_idx * 3 + 2]; const Dtype px = coordinates[row_idx * 3] / pw; const Dtype py = coordinates[row_idx * 3 + 1] / pw; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerPTLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerPTLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerPTBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<9; ++i) { if (!is_pre_defined_theta[i]) { copy_values<Dtype> << <CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS >> >(num_threads, 9, i, dFull_theta, 9 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/9 of dFull_theta to " << k << "/" << // 9 - pre_defined_count << " of dTheta" << std::endl; ++k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerPTBackwardGPU_dU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerPTLayer); } // namespace caffe
8be56a05c90f236e237672e2b3016fea873e2d72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Simple kernel demonstrating atomic functions in device code. */ #ifndef _SIMPLEATOMICS_KERNEL_H_ #define _SIMPLEATOMICS_KERNEL_H_ //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for atomic instructions //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel(int* g_odata) { // access thread id const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; // Test various atomic instructions // Arithmetic atomic instructions // Atomic addition atomicAdd(&g_odata[0], 10); // Atomic subtraction (final should be 0) atomicSub(&g_odata[1], 10); // Atomic exchange atomicExch(&g_odata[2], tid); // Atomic maximum atomicMax(&g_odata[3], tid); // Atomic minimum atomicMin(&g_odata[4], tid); // Atomic increment (modulo 17+1) atomicInc((unsigned int*)&g_odata[5], 17); // Atomic decrement atomicDec((unsigned int*)&g_odata[6], 137); // Atomic compare-and-swap atomicCAS(&g_odata[7], tid-1, tid); // Bitwise atomic instructions // Atomic AND atomicAnd(&g_odata[8], 2*tid+7); // Atomic OR atomicOr(&g_odata[9], 1 << tid); // Atomic XOR atomicXor(&g_odata[10], tid); } #endif // #ifndef _SIMPLEATOMICS_KERNEL_H_
8be56a05c90f236e237672e2b3016fea873e2d72.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Simple kernel demonstrating atomic functions in device code. */ #ifndef _SIMPLEATOMICS_KERNEL_H_ #define _SIMPLEATOMICS_KERNEL_H_ //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for atomic instructions //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void testKernel(int* g_odata) { // access thread id const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; // Test various atomic instructions // Arithmetic atomic instructions // Atomic addition atomicAdd(&g_odata[0], 10); // Atomic subtraction (final should be 0) atomicSub(&g_odata[1], 10); // Atomic exchange atomicExch(&g_odata[2], tid); // Atomic maximum atomicMax(&g_odata[3], tid); // Atomic minimum atomicMin(&g_odata[4], tid); // Atomic increment (modulo 17+1) atomicInc((unsigned int*)&g_odata[5], 17); // Atomic decrement atomicDec((unsigned int*)&g_odata[6], 137); // Atomic compare-and-swap atomicCAS(&g_odata[7], tid-1, tid); // Bitwise atomic instructions // Atomic AND atomicAnd(&g_odata[8], 2*tid+7); // Atomic OR atomicOr(&g_odata[9], 1 << tid); // Atomic XOR atomicXor(&g_odata[10], tid); } #endif // #ifndef _SIMPLEATOMICS_KERNEL_H_
225159338c4ba331f4d3203d7119bf5b111f47d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void shuffleGene(float *gene, float *fit, float *rSeed, int* metaData) { const int idx = threadIdx.x + blockDim.x*blockIdx.x; int nGene = metaData[1]; int nHalf = nGene / 2; if(idx> nHalf) return; int Offset = int(nHalf/5.3); int j = nHalf + (idx + Offset)%nHalf; for(int k=0; k<6; k++) { float t = gene[idx*6+k]; gene[idx*6+k] = gene[j*6+k]; gene[j*6+k] = t; t = fit[idx]; fit[idx] = fit[j]; fit[j] = t; } }
225159338c4ba331f4d3203d7119bf5b111f47d9.cu
#include "includes.h" __global__ void shuffleGene(float *gene, float *fit, float *rSeed, int* metaData) { const int idx = threadIdx.x + blockDim.x*blockIdx.x; int nGene = metaData[1]; int nHalf = nGene / 2; if(idx> nHalf) return; int Offset = int(nHalf/5.3); int j = nHalf + (idx + Offset)%nHalf; for(int k=0; k<6; k++) { float t = gene[idx*6+k]; gene[idx*6+k] = gene[j*6+k]; gene[j*6+k] = t; t = fit[idx]; fit[idx] = fit[j]; fit[j] = t; } }
2e645d4106d105792d33e253ba04d41b28d80f69.hip
// !!! This is a file automatically generated by hipify!!! #include "kernel_VoxelMap.cuh" #include <cassert> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "cuda_analyticGeometry.h" #include "cuda_error_check.h" #include "cuda_math_utils.h" namespace sfs { namespace cuda { #define MAX_NUM_CLUSTER 254 // Index Type is uchar and 0 is a marker for "no singular view ray" #define MAX_NUM_FACES 256 // TODO: Multiple kernel passes when this number is exceeded. Maybe not using constant memory is faster then #define MAX_NUM_CAMERAS 16 // TODO: An zentralerer Stelle festlegen? __constant__ DeviceVoxelCluster const_deviceCluster[MAX_NUM_CLUSTER]; __constant__ Face const_deviceFaces[MAX_NUM_FACES]; __constant__ float3 const_deviceCameraCenters[MAX_NUM_CAMERAS]; __host__ __device__ bool intersectsInPlane(const Roi3DF & roi, const DeviceViewRay & ray) { float3 points[4]; float3 directions[4]; points[0] = make_float3(roi.x1, roi.y1, 0); points[1] = make_float3(roi.x1, roi.y1, 0); points[2] = make_float3(roi.x2, roi.y2, 0); points[3] = make_float3(roi.x2, roi.y2, 0); directions[0] = make_float3(roi.x2 - roi.x1, roi.y1 - roi.y1, 0); directions[1] = make_float3(roi.x1 - roi.x1, roi.y2 - roi.y1, 0); directions[2] = make_float3(roi.x2 - roi.x2, roi.y1 - roi.y2, 0); directions[3] = make_float3(roi.x1 - roi.x2, roi.y2 - roi.y2, 0); const float3 rayOrigin = make_float3(ray.origin.x, ray.origin.y, 0); const float3 rayDirection = make_float3(ray.ray.x, ray.ray.y, 0); //#pragma unroll 4 for (int i = 0; i < 4; ++i) { float3 closestPoint; float scalar1, scalar2; intersectLineAndLine(&closestPoint, rayOrigin, rayDirection, points[i], directions[i], &scalar1, &scalar2); if (scalar2 >= 0 && scalar2 <= norm(directions[i])) // scalar2 is for x = p2 + scala2 * (v2 / v2.norm()) { return true; } } return false; } __device__ inline bool isInsideImage(uint x, uint y, uint2 imgSize) { return x < imgSize.x && y < imgSize.y; } __device__ inline bool isValidPixelMapOffset(uint idxVoxel, uint idxCorner, uint idxCamera, uint3 numVoxel, uint numCorners, uint numCameras) { return idxVoxel < prod(numVoxel) && idxCorner < numCorners && idxCamera < numCameras; } __global__ void checkForSingularViewRays(DeviceViewRay * pViewRays, unsigned char * pImagesSegmentation, unsigned char * viewRayMaps, uint2 imgSize, uint numImages, uint numCluster) { const uint idxX = blockIdx.x * blockDim.x + threadIdx.x; const uint idxY = blockIdx.y * blockDim.y + threadIdx.y; const uint idxCam = blockIdx.z; /* * Erzeugt ein Ausgabebild wo jeder Pixel markiert ist mit dem Index des Clusters den der Sichtstrahl einzigartig schneidet. * -> Wir brauchen auch eine HostVoxelMap um die Pixel dann den Sichtstrahlen zuzuordnen */ if (isInsideImage(idxX, idxY, imgSize)) { const uint offsetImage = idxCam * imgSize.x * imgSize.y; const uint offsetPixel = idxY * imgSize.x + idxX; uint counterIntersections = 0; for (uint idxCluster = 0; idxCluster < numCluster; ++idxCluster) { const bool intersectsCluster = intersectsInPlane(const_deviceCluster[idxCluster].boundingBox, pViewRays[offsetImage + offsetPixel]); if (intersectsCluster && pImagesSegmentation[offsetImage + offsetPixel] != 0) { ++counterIntersections; if (counterIntersections == 1) { viewRayMaps[offsetImage + offsetPixel] = idxCluster + 1; // 0 means no cluster therefore we have to start at 1 } } } } } __global__ void removePixelMapEntriesToNonVisiblePoints(int2 * pPixelMaps, Voxel * pVoxel, uint3 numVoxel, uint numCorners, uint numCameras, uint numFaces) { const uint idxVoxel = blockIdx.x * blockDim.x + threadIdx.x; const uint idxCorner = blockIdx.y * blockDim.y + threadIdx.y; const uint idxCamera = blockIdx.z; const uint pixelMapOffset = idxVoxel * numCorners * numCameras + idxCorner * numCameras + idxCamera; if (isValidPixelMapOffset(idxVoxel, idxCorner, idxCamera, numVoxel, numCorners, numCameras)) { const float3 & cameraCenter = const_deviceCameraCenters[idxCamera]; const float3 & voxelCorner = pVoxel[idxVoxel].getCorners()[idxCorner]; bool occluded = false; for (uint idxFace = 0; idxFace < numFaces; ++idxFace) { const Face & face = const_deviceFaces[idxFace]; if (face.isOccluded(voxelCorner, cameraCenter)) { occluded = true; } } if (occluded && pPixelMaps[pixelMapOffset].x != INT_MIN && pPixelMaps[pixelMapOffset].y != INT_MIN) { pPixelMaps[pixelMapOffset].x = -pPixelMaps[pixelMapOffset].x; // A negative value != INT_MIN means occluded pPixelMaps[pixelMapOffset].y = -pPixelMaps[pixelMapOffset].y; } } } void call_checkForSingularViewRays(const std::vector<DeviceVoxelCluster> & voxelCluster, DeviceViewRay * p_dev_viewRays, unsigned char * p_dev_imagesSegmentation, unsigned char * p_dev_viewRayMaps, uint numImages, uint2 imageSize) { const uint numCluster = static_cast<uint>(voxelCluster.size()); assert(numCluster < MAX_NUM_CLUSTER); if (numCluster > MAX_NUM_CLUSTER) { throw std::runtime_error("Too many clusters. Are you sure the previous processing works correctly?"); } cudaSafeCall(hipMemcpyToSymbol(const_deviceCluster, voxelCluster.data(), numCluster * sizeof(DeviceVoxelCluster))); dim3 blockSize(32, 32); dim3 gridSize((imageSize.x + 31) / 32, (imageSize.y + 31) / 32, numImages); checkForSingularViewRays << <gridSize, blockSize >> > (p_dev_viewRays, p_dev_imagesSegmentation, p_dev_viewRayMaps, imageSize, numImages, numCluster); cudaCheckError(); } void call_removePixelMapEntriesToNonVisiblePoints(int2 * p_dev_pixelMap, Voxel * p_dev_voxel, const std::vector<Face> & faces, std::vector<float3> cameraCenters, uint3 numVoxel) { const uint numCameras = static_cast<uint>(cameraCenters.size()); if (numCameras > MAX_NUM_CAMERAS) { throw std::runtime_error("Too much cameras."); } cudaSafeCall(hipMemcpyToSymbol(const_deviceCameraCenters, cameraCenters.data(), numCameras * sizeof(float3))); dim3 blockSize(128, 8); // Voxels have 8 Corners. I don't think this will change anytime soon. dim3 gridSize((prod(numVoxel) + 127) / 128, 1, numCameras); uint numFaces = static_cast<uint>(faces.size()); size_t batchOffset = 0; std::cout << "Calculating Voxel occlusions with " << numFaces << " Faces in batches of " << MAX_NUM_FACES << "." << std::endl; while (numFaces > 0) { const uint batchSize = numFaces > MAX_NUM_FACES ? MAX_NUM_FACES : numFaces; cudaSafeCall(hipMemcpyToSymbol(const_deviceFaces, faces.data() + batchOffset, batchSize * sizeof(Face))); removePixelMapEntriesToNonVisiblePoints << <gridSize, blockSize >> > (p_dev_pixelMap, p_dev_voxel, numVoxel, 8, numCameras, batchSize); auto e = hipDeviceSynchronize(); if( e != hipSuccess) { std::cout << "\nError on launch:\n" << hipGetErrorString(e); } cudaCheckError(); numFaces -= batchSize; batchOffset += batchSize; std::cout << "\r" << batchOffset << " | " << numFaces << " done. "; } std::cout << "\rDone " << std::endl; } } }
2e645d4106d105792d33e253ba04d41b28d80f69.cu
#include "kernel_VoxelMap.cuh" #include <cassert> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "cuda_analyticGeometry.h" #include "cuda_error_check.h" #include "cuda_math_utils.h" namespace sfs { namespace cuda { #define MAX_NUM_CLUSTER 254 // Index Type is uchar and 0 is a marker for "no singular view ray" #define MAX_NUM_FACES 256 // TODO: Multiple kernel passes when this number is exceeded. Maybe not using constant memory is faster then #define MAX_NUM_CAMERAS 16 // TODO: An zentralerer Stelle festlegen? __constant__ DeviceVoxelCluster const_deviceCluster[MAX_NUM_CLUSTER]; __constant__ Face const_deviceFaces[MAX_NUM_FACES]; __constant__ float3 const_deviceCameraCenters[MAX_NUM_CAMERAS]; __host__ __device__ bool intersectsInPlane(const Roi3DF & roi, const DeviceViewRay & ray) { float3 points[4]; float3 directions[4]; points[0] = make_float3(roi.x1, roi.y1, 0); points[1] = make_float3(roi.x1, roi.y1, 0); points[2] = make_float3(roi.x2, roi.y2, 0); points[3] = make_float3(roi.x2, roi.y2, 0); directions[0] = make_float3(roi.x2 - roi.x1, roi.y1 - roi.y1, 0); directions[1] = make_float3(roi.x1 - roi.x1, roi.y2 - roi.y1, 0); directions[2] = make_float3(roi.x2 - roi.x2, roi.y1 - roi.y2, 0); directions[3] = make_float3(roi.x1 - roi.x2, roi.y2 - roi.y2, 0); const float3 rayOrigin = make_float3(ray.origin.x, ray.origin.y, 0); const float3 rayDirection = make_float3(ray.ray.x, ray.ray.y, 0); //#pragma unroll 4 for (int i = 0; i < 4; ++i) { float3 closestPoint; float scalar1, scalar2; intersectLineAndLine(&closestPoint, rayOrigin, rayDirection, points[i], directions[i], &scalar1, &scalar2); if (scalar2 >= 0 && scalar2 <= norm(directions[i])) // scalar2 is for x = p2 + scala2 * (v2 / v2.norm()) { return true; } } return false; } __device__ inline bool isInsideImage(uint x, uint y, uint2 imgSize) { return x < imgSize.x && y < imgSize.y; } __device__ inline bool isValidPixelMapOffset(uint idxVoxel, uint idxCorner, uint idxCamera, uint3 numVoxel, uint numCorners, uint numCameras) { return idxVoxel < prod(numVoxel) && idxCorner < numCorners && idxCamera < numCameras; } __global__ void checkForSingularViewRays(DeviceViewRay * pViewRays, unsigned char * pImagesSegmentation, unsigned char * viewRayMaps, uint2 imgSize, uint numImages, uint numCluster) { const uint idxX = blockIdx.x * blockDim.x + threadIdx.x; const uint idxY = blockIdx.y * blockDim.y + threadIdx.y; const uint idxCam = blockIdx.z; /* * Erzeugt ein Ausgabebild wo jeder Pixel markiert ist mit dem Index des Clusters den der Sichtstrahl einzigartig schneidet. * -> Wir brauchen auch eine HostVoxelMap um die Pixel dann den Sichtstrahlen zuzuordnen */ if (isInsideImage(idxX, idxY, imgSize)) { const uint offsetImage = idxCam * imgSize.x * imgSize.y; const uint offsetPixel = idxY * imgSize.x + idxX; uint counterIntersections = 0; for (uint idxCluster = 0; idxCluster < numCluster; ++idxCluster) { const bool intersectsCluster = intersectsInPlane(const_deviceCluster[idxCluster].boundingBox, pViewRays[offsetImage + offsetPixel]); if (intersectsCluster && pImagesSegmentation[offsetImage + offsetPixel] != 0) { ++counterIntersections; if (counterIntersections == 1) { viewRayMaps[offsetImage + offsetPixel] = idxCluster + 1; // 0 means no cluster therefore we have to start at 1 } } } } } __global__ void removePixelMapEntriesToNonVisiblePoints(int2 * pPixelMaps, Voxel * pVoxel, uint3 numVoxel, uint numCorners, uint numCameras, uint numFaces) { const uint idxVoxel = blockIdx.x * blockDim.x + threadIdx.x; const uint idxCorner = blockIdx.y * blockDim.y + threadIdx.y; const uint idxCamera = blockIdx.z; const uint pixelMapOffset = idxVoxel * numCorners * numCameras + idxCorner * numCameras + idxCamera; if (isValidPixelMapOffset(idxVoxel, idxCorner, idxCamera, numVoxel, numCorners, numCameras)) { const float3 & cameraCenter = const_deviceCameraCenters[idxCamera]; const float3 & voxelCorner = pVoxel[idxVoxel].getCorners()[idxCorner]; bool occluded = false; for (uint idxFace = 0; idxFace < numFaces; ++idxFace) { const Face & face = const_deviceFaces[idxFace]; if (face.isOccluded(voxelCorner, cameraCenter)) { occluded = true; } } if (occluded && pPixelMaps[pixelMapOffset].x != INT_MIN && pPixelMaps[pixelMapOffset].y != INT_MIN) { pPixelMaps[pixelMapOffset].x = -pPixelMaps[pixelMapOffset].x; // A negative value != INT_MIN means occluded pPixelMaps[pixelMapOffset].y = -pPixelMaps[pixelMapOffset].y; } } } void call_checkForSingularViewRays(const std::vector<DeviceVoxelCluster> & voxelCluster, DeviceViewRay * p_dev_viewRays, unsigned char * p_dev_imagesSegmentation, unsigned char * p_dev_viewRayMaps, uint numImages, uint2 imageSize) { const uint numCluster = static_cast<uint>(voxelCluster.size()); assert(numCluster < MAX_NUM_CLUSTER); if (numCluster > MAX_NUM_CLUSTER) { throw std::runtime_error("Too many clusters. Are you sure the previous processing works correctly?"); } cudaSafeCall(cudaMemcpyToSymbol(const_deviceCluster, voxelCluster.data(), numCluster * sizeof(DeviceVoxelCluster))); dim3 blockSize(32, 32); dim3 gridSize((imageSize.x + 31) / 32, (imageSize.y + 31) / 32, numImages); checkForSingularViewRays << <gridSize, blockSize >> > (p_dev_viewRays, p_dev_imagesSegmentation, p_dev_viewRayMaps, imageSize, numImages, numCluster); cudaCheckError(); } void call_removePixelMapEntriesToNonVisiblePoints(int2 * p_dev_pixelMap, Voxel * p_dev_voxel, const std::vector<Face> & faces, std::vector<float3> cameraCenters, uint3 numVoxel) { const uint numCameras = static_cast<uint>(cameraCenters.size()); if (numCameras > MAX_NUM_CAMERAS) { throw std::runtime_error("Too much cameras."); } cudaSafeCall(cudaMemcpyToSymbol(const_deviceCameraCenters, cameraCenters.data(), numCameras * sizeof(float3))); dim3 blockSize(128, 8); // Voxels have 8 Corners. I don't think this will change anytime soon. dim3 gridSize((prod(numVoxel) + 127) / 128, 1, numCameras); uint numFaces = static_cast<uint>(faces.size()); size_t batchOffset = 0; std::cout << "Calculating Voxel occlusions with " << numFaces << " Faces in batches of " << MAX_NUM_FACES << "." << std::endl; while (numFaces > 0) { const uint batchSize = numFaces > MAX_NUM_FACES ? MAX_NUM_FACES : numFaces; cudaSafeCall(cudaMemcpyToSymbol(const_deviceFaces, faces.data() + batchOffset, batchSize * sizeof(Face))); removePixelMapEntriesToNonVisiblePoints << <gridSize, blockSize >> > (p_dev_pixelMap, p_dev_voxel, numVoxel, 8, numCameras, batchSize); auto e = cudaDeviceSynchronize(); if( e != cudaSuccess) { std::cout << "\nError on launch:\n" << cudaGetErrorString(e); } cudaCheckError(); numFaces -= batchSize; batchOffset += batchSize; std::cout << "\r" << batchOffset << " | " << numFaces << " done. "; } std::cout << "\rDone " << std::endl; } } }
68de8a7ac0ef4253aab3439bffa2317e6216d074.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************** GpuShareSat -- Copyright (c) 2020, Nicolas Prevot Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ #include <boost/test/unit_test.hpp> #include <hip/hip_runtime.h> #include "gpuShareLib/ContigCopy.cuh" #include "gpuShareLib/GpuUtils.cuh" #include "gpuShareLib/Reporter.cuh" #include "../testUtils/TestHelper.cuh" namespace Glucose { inline void clearObj(int a) { // do nothing } } #include "../gpu/GpuHelpedSolver.h" namespace GpuShare { BOOST_AUTO_TEST_SUITE(OtherTest) __device__ void incrArr(DArr<int> arr) { for (int i = 0; i < arr.size(); i++) { arr[i] ++; } } __global__ void dTestIncrease(DArr<int> arr1, DArr<int> arr2) { incrArr(arr1); incrArr(arr2); } __global__ void dTestIncrease(DArr<int> arr) { incrArr(arr); } __global__ void dTestIncrease(int *v) { atomicAdd(v, 1); } __global__ void dTestSetAt(int *v, int val) { *v = val; } BOOST_AUTO_TEST_CASE(testContigCopyArr) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> ap1 = copier.buildArrPair<int>(4, NULL); ArrPair<int> ap2 = copier.buildArrPair<int>(3, NULL); BOOST_CHECK_EQUAL(4, ap1.getHArr().size()); BOOST_CHECK_EQUAL(4, ap1.getDArr().size()); BOOST_CHECK_EQUAL(3, ap2.getHArr().size()); BOOST_CHECK_EQUAL(3, ap2.getDArr().size()); ap1.getHArr()[3] = 2; ap2.getHArr()[1] = 7; BOOST_CHECK(copier.tryCopyAsync(hipMemcpyHostToDevice, sp.get())); hipLaunchKernelGGL(( dTestIncrease), dim3(1), dim3(1), 0, sp.get(), ap1.getDArr(), ap2.getDArr()); BOOST_CHECK(copier.tryCopyAsync(hipMemcpyDeviceToHost, sp.get())); exitIfError(hipStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(3, ap1.getHArr()[3]); BOOST_CHECK_EQUAL(8, ap2.getHArr()[1]); } BOOST_AUTO_TEST_CASE(testContigCopyDeviceToHostOnly) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> ap = copier.buildArrPair<int>(1, NULL); hipLaunchKernelGGL(( dTestSetAt), dim3(1), dim3(1), 0, sp.get(), ap.getDArr().getPtr(), 3); BOOST_CHECK(copier.tryCopyAsync(hipMemcpyDeviceToHost, sp.get())); exitIfError(hipStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(3, ap.getHArr()[0]); } // CUDA says that to read from a pointer of size 4, the address must be a multiple of 4 // Test that we don't get a misalignment if we have sizes 1 and 4 BOOST_AUTO_TEST_CASE(testContigAlignment) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<bool> opb = copier.buildArrPair<bool>(1, NULL); ArrPair<int> opi = copier.buildArrPair<int>(1, NULL); opi.getHArr()[0] = 6; BOOST_CHECK(copier.tryCopyAsync(hipMemcpyHostToDevice, sp.get())); hipLaunchKernelGGL(( dTestIncrease), dim3(1), dim3(1), 0, sp.get(), opi.getDArr().getPtr()); BOOST_CHECK(copier.tryCopyAsync(hipMemcpyDeviceToHost, sp.get())); exitIfError(hipStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(7, opi.getHArr()[0]); } BOOST_AUTO_TEST_CASE(testContigResize) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> opi = copier.buildArrPair<int>(1, NULL); opi.increaseSize(2); opi.getHArr()[0] = 6; opi.getHArr()[1] = 7; BOOST_CHECK(copier.tryCopyAsync(hipMemcpyHostToDevice, sp.get())); hipLaunchKernelGGL(( dTestIncrease), dim3(1), dim3(1), 0, sp.get(), opi.getDArr()); BOOST_CHECK(copier.tryCopyAsync(hipMemcpyDeviceToHost, sp.get())); exitIfError(hipStreamSynchronize(sp.get()), POSITION); printf("s is %d\n", copier.getSize()); BOOST_CHECK_EQUAL(7, opi.getHArr()[0]); BOOST_CHECK_EQUAL(8, opi.getHArr()[1]); } __global__ void dClear(DReporter<int> rep) { rep.clear(); } __global__ void dReport(int v, DReporter<int> rep) { rep.report(v, getThreadId()); } BOOST_AUTO_TEST_CASE(RollingReportTestOne) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier cc(logger); { Reporter<int> rr(cc, sp.get(), 3, 1); auto dReporter = rr.getDReporter(); hipLaunchKernelGGL(( dClear), dim3(1), dim3(1), 0, sp.get(), dReporter); hipLaunchKernelGGL(( dReport), dim3(1), dim3(1), 0, sp.get(), 2, dReporter); hipLaunchKernelGGL(( dReport), dim3(1), dim3(1), 0, sp.get(), 5, dReporter); exitIfFalse(cc.tryCopyAsync(hipMemcpyDeviceToHost, sp.get()), POSITION); exitIfError(hipStreamSynchronize(sp.get()), POSITION); std::vector<int> l; rr.getCopiedToHost(l); BOOST_CHECK_EQUAL(2, l.size()); BOOST_CHECK_EQUAL(2, l[0]); BOOST_CHECK_EQUAL(5, l[1]); } cc.clear(false); { Reporter<int> rr(cc, sp.get(), 3, 1); auto dReporter = rr.getDReporter(); hipLaunchKernelGGL(( dClear), dim3(1), dim3(1), 0, sp.get(), dReporter); hipLaunchKernelGGL(( dReport), dim3(1), dim3(1), 0, sp.get(), 1, dReporter); hipLaunchKernelGGL(( dReport), dim3(1), dim3(1), 0, sp.get(), 7, dReporter); exitIfFalse(cc.tryCopyAsync(hipMemcpyDeviceToHost, sp.get()), POSITION); exitIfError(hipStreamSynchronize(sp.get()), POSITION); std::vector<int> l; rr.getCopiedToHost(l); BOOST_CHECK_EQUAL(2, l.size()); BOOST_CHECK_EQUAL(1, l[0]); BOOST_CHECK_EQUAL(7, l[1]); } } /* Failing test which checks the destr checks work fine BOOST_AUTO_TEST_CASE(destrCheckFail) { CorrespArr<int> car(4, false, false); DArr<int> darr = car.getDArr(); car.resize(2000, false); // we only resize the device once we get a darr DArr<int> darr2 = car.getDArr(); dTestIncrease<<<1, 1>>>(darr); exitIfError(hipDeviceSynchronize(), POSITION); } */ BOOST_AUTO_TEST_SUITE_END() }
68de8a7ac0ef4253aab3439bffa2317e6216d074.cu
/*************************************************************************************** GpuShareSat -- Copyright (c) 2020, Nicolas Prevot Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************************************/ #include <boost/test/unit_test.hpp> #include <cuda.h> #include "gpuShareLib/ContigCopy.cuh" #include "gpuShareLib/GpuUtils.cuh" #include "gpuShareLib/Reporter.cuh" #include "../testUtils/TestHelper.cuh" namespace Glucose { inline void clearObj(int a) { // do nothing } } #include "../gpu/GpuHelpedSolver.h" namespace GpuShare { BOOST_AUTO_TEST_SUITE(OtherTest) __device__ void incrArr(DArr<int> arr) { for (int i = 0; i < arr.size(); i++) { arr[i] ++; } } __global__ void dTestIncrease(DArr<int> arr1, DArr<int> arr2) { incrArr(arr1); incrArr(arr2); } __global__ void dTestIncrease(DArr<int> arr) { incrArr(arr); } __global__ void dTestIncrease(int *v) { atomicAdd(v, 1); } __global__ void dTestSetAt(int *v, int val) { *v = val; } BOOST_AUTO_TEST_CASE(testContigCopyArr) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> ap1 = copier.buildArrPair<int>(4, NULL); ArrPair<int> ap2 = copier.buildArrPair<int>(3, NULL); BOOST_CHECK_EQUAL(4, ap1.getHArr().size()); BOOST_CHECK_EQUAL(4, ap1.getDArr().size()); BOOST_CHECK_EQUAL(3, ap2.getHArr().size()); BOOST_CHECK_EQUAL(3, ap2.getDArr().size()); ap1.getHArr()[3] = 2; ap2.getHArr()[1] = 7; BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyHostToDevice, sp.get())); dTestIncrease<<<1, 1, 0, sp.get()>>>(ap1.getDArr(), ap2.getDArr()); BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get())); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(3, ap1.getHArr()[3]); BOOST_CHECK_EQUAL(8, ap2.getHArr()[1]); } BOOST_AUTO_TEST_CASE(testContigCopyDeviceToHostOnly) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> ap = copier.buildArrPair<int>(1, NULL); dTestSetAt<<<1, 1, 0, sp.get()>>>(ap.getDArr().getPtr(), 3); BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get())); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(3, ap.getHArr()[0]); } // CUDA says that to read from a pointer of size 4, the address must be a multiple of 4 // Test that we don't get a misalignment if we have sizes 1 and 4 BOOST_AUTO_TEST_CASE(testContigAlignment) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<bool> opb = copier.buildArrPair<bool>(1, NULL); ArrPair<int> opi = copier.buildArrPair<int>(1, NULL); opi.getHArr()[0] = 6; BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyHostToDevice, sp.get())); dTestIncrease<<<1, 1, 0, sp.get()>>>(opi.getDArr().getPtr()); BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get())); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); BOOST_CHECK_EQUAL(7, opi.getHArr()[0]); } BOOST_AUTO_TEST_CASE(testContigResize) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier copier(logger); ArrPair<int> opi = copier.buildArrPair<int>(1, NULL); opi.increaseSize(2); opi.getHArr()[0] = 6; opi.getHArr()[1] = 7; BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyHostToDevice, sp.get())); dTestIncrease<<<1, 1, 0, sp.get()>>>(opi.getDArr()); BOOST_CHECK(copier.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get())); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); printf("s is %d\n", copier.getSize()); BOOST_CHECK_EQUAL(7, opi.getHArr()[0]); BOOST_CHECK_EQUAL(8, opi.getHArr()[1]); } __global__ void dClear(DReporter<int> rep) { rep.clear(); } __global__ void dReport(int v, DReporter<int> rep) { rep.report(v, getThreadId()); } BOOST_AUTO_TEST_CASE(RollingReportTestOne) { StreamPointer sp; Logger logger {2, directPrint}; ContigCopier cc(logger); { Reporter<int> rr(cc, sp.get(), 3, 1); auto dReporter = rr.getDReporter(); dClear<<<1, 1, 0, sp.get()>>>(dReporter); dReport<<<1, 1, 0, sp.get()>>>(2, dReporter); dReport<<<1, 1, 0, sp.get()>>>(5, dReporter); exitIfFalse(cc.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get()), POSITION); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); std::vector<int> l; rr.getCopiedToHost(l); BOOST_CHECK_EQUAL(2, l.size()); BOOST_CHECK_EQUAL(2, l[0]); BOOST_CHECK_EQUAL(5, l[1]); } cc.clear(false); { Reporter<int> rr(cc, sp.get(), 3, 1); auto dReporter = rr.getDReporter(); dClear<<<1, 1, 0, sp.get()>>>(dReporter); dReport<<<1, 1, 0, sp.get()>>>(1, dReporter); dReport<<<1, 1, 0, sp.get()>>>(7, dReporter); exitIfFalse(cc.tryCopyAsync(cudaMemcpyDeviceToHost, sp.get()), POSITION); exitIfError(cudaStreamSynchronize(sp.get()), POSITION); std::vector<int> l; rr.getCopiedToHost(l); BOOST_CHECK_EQUAL(2, l.size()); BOOST_CHECK_EQUAL(1, l[0]); BOOST_CHECK_EQUAL(7, l[1]); } } /* Failing test which checks the destr checks work fine BOOST_AUTO_TEST_CASE(destrCheckFail) { CorrespArr<int> car(4, false, false); DArr<int> darr = car.getDArr(); car.resize(2000, false); // we only resize the device once we get a darr DArr<int> darr2 = car.getDArr(); dTestIncrease<<<1, 1>>>(darr); exitIfError(cudaDeviceSynchronize(), POSITION); } */ BOOST_AUTO_TEST_SUITE_END() }
22cff4e0c50b0904dc919f6c288836b659bdabdb.hip
// !!! This is a file automatically generated by hipify!!! #include "backend/gpu_tensor.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> namespace blitz { template<typename DType> GPUTensor<DType>::~GPUTensor() { hipFree(this->data_); } template<typename DType> inline void GPUTensor<DType>::Fill(DType value) { if (value == 0) { hipMemset(this->data_, 0, sizeof(DType) * this->size()); } else { // TODO(Keren) thrust::device_ptr<DType> dptr = thrust::device_pointer_cast(this->data_); thrust::fill(dptr, dptr + this->size(), value); } } template<typename DType> inline void GPUTensor<DType>::Reshape() { } template<typename DType> inline DType* GPUTensor<DType>::Slice(size_t index) { // TODO(keren) error return this->data_ + index; } template<typename DType> inline const DType* GPUTensor<DType>::Slice(size_t index) const { // TODO(keren) error return this->data_ + index; } template<typename DType> inline void GPUTensor<DType>::Allocate() { hipMalloc(&(this->data_), sizeof(DType) * this->size()); this->Fill(0); } template<typename DType> inline void GPUTensor<DType>::OutputCSV(ofstream& ofs) const { } INSTANTIATE_TENSOR(GPUTensor); } // namespace blitz
22cff4e0c50b0904dc919f6c288836b659bdabdb.cu
#include "backend/gpu_tensor.h" #include <cuda.h> #include <cuda_runtime_api.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> namespace blitz { template<typename DType> GPUTensor<DType>::~GPUTensor() { cudaFree(this->data_); } template<typename DType> inline void GPUTensor<DType>::Fill(DType value) { if (value == 0) { cudaMemset(this->data_, 0, sizeof(DType) * this->size()); } else { // TODO(Keren) thrust::device_ptr<DType> dptr = thrust::device_pointer_cast(this->data_); thrust::fill(dptr, dptr + this->size(), value); } } template<typename DType> inline void GPUTensor<DType>::Reshape() { } template<typename DType> inline DType* GPUTensor<DType>::Slice(size_t index) { // TODO(keren) error return this->data_ + index; } template<typename DType> inline const DType* GPUTensor<DType>::Slice(size_t index) const { // TODO(keren) error return this->data_ + index; } template<typename DType> inline void GPUTensor<DType>::Allocate() { cudaMalloc(&(this->data_), sizeof(DType) * this->size()); this->Fill(0); } template<typename DType> inline void GPUTensor<DType>::OutputCSV(ofstream& ofs) const { } INSTANTIATE_TENSOR(GPUTensor); } // namespace blitz
74a3ca23485515577adb6065df6a28207080fdcc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "binarize.cuh" #include <catboost/cuda/gpu_data/gpu_structures.h> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { template <bool ATOMIC_UPDATE, int BLOCK_SIZE, int DOCS_PER_THREAD> __launch_bounds__(BLOCK_SIZE, 2) __global__ void BinarizeFloatFeatureImpl(TCFeature feature, const float* values, ui32 docCount, const float* borders, const ui32* gatherIndex, ui32* dst) { const ui32 i = (blockIdx.x * BLOCK_SIZE * DOCS_PER_THREAD + threadIdx.x); __shared__ float sharedBorders[256]; sharedBorders[0] = borders[0]; __syncthreads(); const int bordersCount = static_cast<int>(sharedBorders[0]); __syncthreads(); dst += feature.Offset * (ui64) docCount; if (threadIdx.x < bordersCount) { sharedBorders[threadIdx.x] = LdgWithFallback(borders, threadIdx.x + 1); } __syncthreads(); ui32 index[DOCS_PER_THREAD]; float featureValues[DOCS_PER_THREAD]; #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { index[j] = 0; const int idx = i + j * BLOCK_SIZE; if (idx < docCount) { const ui32 readIdx = gatherIndex ? StreamLoad(gatherIndex + idx) : idx; featureValues[j] = StreamLoad(values + readIdx); } } #pragma unroll for (int border = 0; border < bordersCount; ++border) { const float borderValue = sharedBorders[border]; #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { if (featureValues[j] > borderValue) { ++index[j]; } } } #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { const int idx = i + j * BLOCK_SIZE; if (idx < docCount) { if (ATOMIC_UPDATE) { atomicOr(dst + idx, (index[j] & feature.Mask) << feature.Shift); } else { ui32 bin = dst[idx]; bin |= (index[j] & feature.Mask) << feature.Shift; dst[idx] = bin; } } } } //smth like bootstrap for quantiles estimation template <ui32 BLOCK_SIZE> __global__ void FastGpuBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { const int valuesPerThread = 2; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_SIZE, 2>; const int tid = threadIdx.x; float vals[valuesPerThread]; if (tid == 0 && blockIdx.x == 0) { borders[0] = bordersCount; } ui64 seed = (blockIdx.x * 6364136223846793005 + 1442695040888963407) + (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; for (int i = 0; i < valuesPerThread; ++i) { const int idx = static_cast<const int>(AdvanceSeed(&seed) % size); vals[i] = StreamLoad(values + idx); } { using TTempStorage = typename BlockRadixSort::TempStorage; __shared__ TTempStorage temp; BlockRadixSort(temp).Sort(vals); } float sum = 0; float weight = 0; for (int i = 0; i < valuesPerThread; ++i) { sum += vals[i]; weight += 1.0f; } __shared__ float localBorders[BLOCK_SIZE]; localBorders[tid] = sum / weight; __syncthreads(); if (tid < bordersCount) { const ui32 offset = static_cast<ui32>((tid + 1.0f) * BLOCK_SIZE / (bordersCount + 1.0f)); atomicAdd(borders + tid + 1, localBorders[offset] / gridDim.x); } } __global__ void SortBordersImpl(float* borders, ui32 bordersCount) { using BlockRadixSort = cub::BlockRadixSort<float, 256, 1>; ui32 tid = threadIdx.x; float val[1]; val[0] = tid < bordersCount ? borders[tid] : PositiveInfty(); using TTempStorage = typename BlockRadixSort::TempStorage; __shared__ TTempStorage temp; BlockRadixSort(temp).Sort(val); if (tid < bordersCount) { borders[tid] = val[0]; } } void FastGpuBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { FillBuffer(borders, 0.0f, bordersCount + 1, stream); const ui32 blockSize = 1024; const ui32 valuesPerBlock = 2 * blockSize; const ui32 numBlocks = min(CeilDivide(size, valuesPerBlock), 15); hipLaunchKernelGGL(( FastGpuBordersImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, values, size, borders, bordersCount); hipLaunchKernelGGL(( SortBordersImpl), dim3(1), dim3(256), 0, stream, borders + 1, bordersCount); } __global__ void QuantileBordersImpl(const float* sortedValues, ui32 size, float* borders, ui32 bordersCount) { const ui32 tid = threadIdx.x; __shared__ float localBorders[256]; if (tid < bordersCount) { const ui32 offset = static_cast<ui32>((tid + 1.0) * size / (bordersCount + 1)); localBorders[tid] = LdgWithFallback(sortedValues, offset); } __syncthreads(); if (tid <(bordersCount + 1)) { borders[tid] = tid == 0 ? bordersCount : localBorders[tid - 1]; } } __global__ void UniformBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { const ui32 tid = threadIdx.x; const int blockSize = 1024; __shared__ float localMin[blockSize]; __shared__ float localMax[blockSize]; float minValue = PositiveInfty(); float maxValue = NegativeInfty(); ui64 seed = (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; #pragma unroll 32 for (int i = 0; i < 32; ++i) { const int idx = static_cast<const int>(AdvanceSeed(&seed) % size); float val = StreamLoad(values + idx); minValue = val < minValue ? val : minValue; maxValue = val > maxValue ? val : maxValue; } localMin[tid] = minValue * 0.999; localMax[tid] = maxValue * 1.001; __syncthreads(); for (ui32 s = blockSize >> 1; s > 0; s >>= 1) { if (tid < s) { localMin[tid] = min(localMin[tid], localMin[tid + s]); localMax[tid] = max(localMax[tid], localMax[tid + s]); } __syncthreads(); } minValue = localMin[0]; maxValue = localMax[0]; if (tid < (bordersCount + 1)) { const float borderIdx = (tid / (bordersCount + 1.0)); borders[tid] = tid == 0 ? bordersCount : minValue + borderIdx * (maxValue - minValue); } } void ComputeQuantileBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { hipLaunchKernelGGL(( QuantileBordersImpl), dim3(1), dim3(256), 0, stream , values, size, borders, bordersCount); } void ComputeUniformBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { hipLaunchKernelGGL(( UniformBordersImpl), dim3(1), dim3(1024), 0, stream , values, size, borders, bordersCount); } void BinarizeFloatFeature(const float* values, ui32 docCount, const float* borders, TCFeature feature, ui32* dst, const ui32* gatherIndex, bool atomicUpdate, TCudaStream stream) { const ui32 blockSize = 1024; const ui32 docsPerThread = 8; const ui32 numBlocks = (docCount + docsPerThread * blockSize - 1) / (docsPerThread * blockSize); if (atomicUpdate) { BinarizeFloatFeatureImpl<true, blockSize, docsPerThread> << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, borders, gatherIndex, dst); } else { BinarizeFloatFeatureImpl<false, blockSize, docsPerThread> << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, borders, gatherIndex, dst); } } }
74a3ca23485515577adb6065df6a28207080fdcc.cu
#include "binarize.cuh" #include <catboost/cuda/gpu_data/gpu_structures.h> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/kernel/random_gen.cuh> #include <catboost/cuda/cuda_util/kernel/fill.cuh> #include <contrib/libs/cub/cub/block/block_radix_sort.cuh> namespace NKernel { template <bool ATOMIC_UPDATE, int BLOCK_SIZE, int DOCS_PER_THREAD> __launch_bounds__(BLOCK_SIZE, 2) __global__ void BinarizeFloatFeatureImpl(TCFeature feature, const float* values, ui32 docCount, const float* borders, const ui32* gatherIndex, ui32* dst) { const ui32 i = (blockIdx.x * BLOCK_SIZE * DOCS_PER_THREAD + threadIdx.x); __shared__ float sharedBorders[256]; sharedBorders[0] = borders[0]; __syncthreads(); const int bordersCount = static_cast<int>(sharedBorders[0]); __syncthreads(); dst += feature.Offset * (ui64) docCount; if (threadIdx.x < bordersCount) { sharedBorders[threadIdx.x] = LdgWithFallback(borders, threadIdx.x + 1); } __syncthreads(); ui32 index[DOCS_PER_THREAD]; float featureValues[DOCS_PER_THREAD]; #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { index[j] = 0; const int idx = i + j * BLOCK_SIZE; if (idx < docCount) { const ui32 readIdx = gatherIndex ? StreamLoad(gatherIndex + idx) : idx; featureValues[j] = StreamLoad(values + readIdx); } } #pragma unroll for (int border = 0; border < bordersCount; ++border) { const float borderValue = sharedBorders[border]; #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { if (featureValues[j] > borderValue) { ++index[j]; } } } #pragma unroll for (int j = 0; j < DOCS_PER_THREAD; ++j) { const int idx = i + j * BLOCK_SIZE; if (idx < docCount) { if (ATOMIC_UPDATE) { atomicOr(dst + idx, (index[j] & feature.Mask) << feature.Shift); } else { ui32 bin = dst[idx]; bin |= (index[j] & feature.Mask) << feature.Shift; dst[idx] = bin; } } } } //smth like bootstrap for quantiles estimation template <ui32 BLOCK_SIZE> __global__ void FastGpuBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { const int valuesPerThread = 2; using BlockRadixSort = cub::BlockRadixSort<float, BLOCK_SIZE, 2>; const int tid = threadIdx.x; float vals[valuesPerThread]; if (tid == 0 && blockIdx.x == 0) { borders[0] = bordersCount; } ui64 seed = (blockIdx.x * 6364136223846793005 + 1442695040888963407) + (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; for (int i = 0; i < valuesPerThread; ++i) { const int idx = static_cast<const int>(AdvanceSeed(&seed) % size); vals[i] = StreamLoad(values + idx); } { using TTempStorage = typename BlockRadixSort::TempStorage; __shared__ TTempStorage temp; BlockRadixSort(temp).Sort(vals); } float sum = 0; float weight = 0; for (int i = 0; i < valuesPerThread; ++i) { sum += vals[i]; weight += 1.0f; } __shared__ float localBorders[BLOCK_SIZE]; localBorders[tid] = sum / weight; __syncthreads(); if (tid < bordersCount) { const ui32 offset = static_cast<ui32>((tid + 1.0f) * BLOCK_SIZE / (bordersCount + 1.0f)); atomicAdd(borders + tid + 1, localBorders[offset] / gridDim.x); } } __global__ void SortBordersImpl(float* borders, ui32 bordersCount) { using BlockRadixSort = cub::BlockRadixSort<float, 256, 1>; ui32 tid = threadIdx.x; float val[1]; val[0] = tid < bordersCount ? borders[tid] : PositiveInfty(); using TTempStorage = typename BlockRadixSort::TempStorage; __shared__ TTempStorage temp; BlockRadixSort(temp).Sort(val); if (tid < bordersCount) { borders[tid] = val[0]; } } void FastGpuBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { FillBuffer(borders, 0.0f, bordersCount + 1, stream); const ui32 blockSize = 1024; const ui32 valuesPerBlock = 2 * blockSize; const ui32 numBlocks = min(CeilDivide(size, valuesPerBlock), 15); FastGpuBordersImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(values, size, borders, bordersCount); SortBordersImpl<<<1, 256, 0, stream>>>(borders + 1, bordersCount); } __global__ void QuantileBordersImpl(const float* sortedValues, ui32 size, float* borders, ui32 bordersCount) { const ui32 tid = threadIdx.x; __shared__ float localBorders[256]; if (tid < bordersCount) { const ui32 offset = static_cast<ui32>((tid + 1.0) * size / (bordersCount + 1)); localBorders[tid] = LdgWithFallback(sortedValues, offset); } __syncthreads(); if (tid <(bordersCount + 1)) { borders[tid] = tid == 0 ? bordersCount : localBorders[tid - 1]; } } __global__ void UniformBordersImpl(const float* values, ui32 size, float* borders, ui32 bordersCount) { const ui32 tid = threadIdx.x; const int blockSize = 1024; __shared__ float localMin[blockSize]; __shared__ float localMax[blockSize]; float minValue = PositiveInfty(); float maxValue = NegativeInfty(); ui64 seed = (1664525 * threadIdx.x + 1013904223) & 0xFFFFFF; #pragma unroll 32 for (int i = 0; i < 32; ++i) { const int idx = static_cast<const int>(AdvanceSeed(&seed) % size); float val = StreamLoad(values + idx); minValue = val < minValue ? val : minValue; maxValue = val > maxValue ? val : maxValue; } localMin[tid] = minValue * 0.999; localMax[tid] = maxValue * 1.001; __syncthreads(); for (ui32 s = blockSize >> 1; s > 0; s >>= 1) { if (tid < s) { localMin[tid] = min(localMin[tid], localMin[tid + s]); localMax[tid] = max(localMax[tid], localMax[tid + s]); } __syncthreads(); } minValue = localMin[0]; maxValue = localMax[0]; if (tid < (bordersCount + 1)) { const float borderIdx = (tid / (bordersCount + 1.0)); borders[tid] = tid == 0 ? bordersCount : minValue + borderIdx * (maxValue - minValue); } } void ComputeQuantileBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { QuantileBordersImpl<<< 1, 256, 0, stream >>> (values, size, borders, bordersCount); } void ComputeUniformBorders(const float* values, ui32 size, float* borders, ui32 bordersCount, TCudaStream stream) { UniformBordersImpl<<< 1, 1024, 0, stream >>> (values, size, borders, bordersCount); } void BinarizeFloatFeature(const float* values, ui32 docCount, const float* borders, TCFeature feature, ui32* dst, const ui32* gatherIndex, bool atomicUpdate, TCudaStream stream) { const ui32 blockSize = 1024; const ui32 docsPerThread = 8; const ui32 numBlocks = (docCount + docsPerThread * blockSize - 1) / (docsPerThread * blockSize); if (atomicUpdate) { BinarizeFloatFeatureImpl<true, blockSize, docsPerThread> << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, borders, gatherIndex, dst); } else { BinarizeFloatFeatureImpl<false, blockSize, docsPerThread> << < numBlocks, blockSize, 0, stream >> > (feature, values, docCount, borders, gatherIndex, dst); } } }
4df62a63e99501cd8a906206f86924e1363fbf30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void zupdate2_dummy(float *z1, float *z2, float *f, float tau, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float a, b, t; if (px<nx && py<ny) { // compute the gradient a = 0; b = 0; float fc = f[idx]; // float fr=f[idx+1]; // float fu=f[idx+nx]; // if (!(px==(nx-1))) a = fr - fc; // if (!(py==(ny-1))) b = fu - fc; a = fc; b = fc; // update z t = 1 / (1 + tau*sqrtf(a*a + b*b)); z1[idx] = (z1[idx] + tau*a)*t; z2[idx] = (z2[idx] + tau*b)*t; } }
4df62a63e99501cd8a906206f86924e1363fbf30.cu
#include "includes.h" __global__ void zupdate2_dummy(float *z1, float *z2, float *f, float tau, int nx, int ny) { int px = blockIdx.x * blockDim.x + threadIdx.x; int py = blockIdx.y * blockDim.y + threadIdx.y; int idx = px + py*nx; float a, b, t; if (px<nx && py<ny) { // compute the gradient a = 0; b = 0; float fc = f[idx]; // float fr=f[idx+1]; // float fu=f[idx+nx]; // if (!(px==(nx-1))) a = fr - fc; // if (!(py==(ny-1))) b = fu - fc; a = fc; b = fc; // update z t = 1 / (1 + tau*sqrtf(a*a + b*b)); z1[idx] = (z1[idx] + tau*a)*t; z2[idx] = (z2[idx] + tau*b)*t; } }
236b333dd66c3833ec283808ff0c4eb493afba80.hip
// !!! This is a file automatically generated by hipify!!! /*! * \file exprb43.cu * * \author Nicholas J. Curtis * \date 09/02/2014 * * \brief A krylov subspace integrator using a 4th order (3rd-order embedded) * exponential Rosenbrock method of Hochbruck et al. (2009) * * See full reference: * M. Hochbruck, A. Ostermann, J. Schweitzer, Exponential Rosenbrock-type methods, SIAM J. Numer. Anal. 47 (1) (2009) 786803. doi:10.1137/080717717. * * NOTE: all matricies stored in column major format! * */ /** Include common code. */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <stdbool.h> #include <hip/hip_complex.h> //various mechanism/solver defns //these should be included first #include "header_hip.cuh" #include "solver_options.cuh" #include "solver_props.cuh" #include "dydt.cuh" #ifndef FINITE_DIFFERENCE #include "jacob_hip.cuh" #else #include "fd_jacob.cuh" #endif #include "exprb43_props.cuh" #include "arnoldi.cuh" #include "exponential_linear_algebra.cuh" #include "solver_init.cuh" #include "gpu_macros.cuh" #ifdef GENERATE_DOCS namespace exprb43cu { #endif #ifdef LOG_KRYLOV_AND_STEPSIZES extern __device__ double err_log[MAX_STEPS]; extern __device__ int m_log[MAX_STEPS]; extern __device__ int m1_log[MAX_STEPS]; extern __device__ int m2_log[MAX_STEPS]; extern __device__ double t_log[MAX_STEPS]; extern __device__ double h_log[MAX_STEPS]; extern __device__ bool reject_log[MAX_STEPS]; extern __device__ int num_integrator_steps; #endif #ifdef DIVERGENCE_TEST extern __device__ int integrator_steps[DIVERGENCE_TEST]; #endif /////////////////////////////////////////////////////////////////////////////// /*! * \fn int integrate(const double t_start, const double t_end, const double pr, double* y) * \param t_start The initial integration time * \param t_end The final integration timestep * \param pr User data passed to the RHS function dydt() - commonly used for the Pressure term * \param y The state vector * \param mech The mechanism memory struct * \param solver The solver memory struct * \brief 4th-order exponential integrator function w/ adaptive Kyrlov subspace approximation * \returns The result of this integration step @see exprb43cu_ErrCodes */ __device__ void integrate (const double t_start, const double t_end, const double pr, double* __restrict__ y, const mechanism_memory* __restrict__ mech, const solver_memory* __restrict__ solver) { //initial time #ifdef CONST_TIME_STEP double h = t_end - t_start; #else double h = fmin(1.0e-8, t_end - t_start); #endif double h_new; double err_old = 1.0; double h_old = h; bool reject = false; double t = t_start; // get scaling for weighted norm double * const __restrict__ sc = solver->sc; scale_init(y, sc); #ifdef LOG_KRYLOV_AND_STEPSIZES if (T_ID == 0) { num_integrator_steps = 0; } #endif double beta = 0; //arrays double * const __restrict__ work1 = solver->work1; double * const __restrict__ work2 = solver->work2; double * const __restrict__ y1 = solver->work3; hipDoubleComplex * const __restrict__ work4 = solver->work4; double * const __restrict__ fy = mech->dy; double * const __restrict__ A = mech->jac; double * const __restrict__ Vm = solver->Vm; double * const __restrict__ phiHm = solver->phiHm; double * const __restrict__ savedActions = solver->savedActions; double * const __restrict__ gy = solver->gy; int * const __restrict__ result = solver->result; //vectors for scaling operations double * in[5] = {0, 0, 0, savedActions, y}; double * out[3] = {0, 0, work1}; double scale_vec[3] = {0, 0, 0}; double err = 0.0; int failures = 0; int steps = 0; while (t < t_end) { //error checking if (failures >= MAX_CONSECUTIVE_ERRORS) { result[T_ID] = EC_consecutive_steps; return; } if (steps++ >= MAX_STEPS) { result[T_ID] = EC_max_steps_exceeded; return; } if (t + h <= t) { result[T_ID] = EC_h_plus_t_equals_h; return; } if (!reject) { dydt (t, pr, y, fy, mech); #ifdef FINITE_DIFFERENCE eval_jacob (t, pr, y, A, mech, work1, work2); #else eval_jacob (t, pr, y, A, mech); #endif //gy = fy - A * y sparse_multiplier(A, y, gy); #pragma unroll for (int i = 0; i < NSP; ++i) { gy[INDEX(i)] = fy[INDEX(i)] - gy[INDEX(i)]; } } #ifdef DIVERGENCE_TEST integrator_steps[T_ID]++; #endif int m = arnoldi(0.5, 1, h, A, solver, fy, &beta, work2, work4); if (m + 1 >= STRIDE || m < 0) { //failure: too many krylov vectors required or singular matrix encountered //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } // Un2 to be stored in work1 //Un2 is partially in the mth column of phiHm //Un2 = y + ** 0.5 * h * phi_1(0.5 * h * A)*fy ** //Un2 = y + ** beta * Vm * phiHm(:, m) ** //store h * beta * Vm * phi_1(h * Hm) * e1 in savedActions matvec_m_by_m_plusequal(m, phiHm, &phiHm[GRID_DIM * (m * STRIDE)], work1); matvec_n_by_m_scale(m, beta, Vm, work1, savedActions); //store 0.5 * h * beta * Vm * phi_1(0.5 * h * Hm) * fy + y in work1 matvec_n_by_m_scale_add(m, beta, Vm, &phiHm[GRID_DIM * (m * STRIDE)], work1, y); //work1 is now equal to Un2 //next compute Dn2 //Dn2 = (F(Un2) - Jn * Un2) - gy dydt(t, pr, work1, &savedActions[GRID_DIM * NSP], mech); sparse_multiplier(A, work1, work2); #pragma unroll for (int i = 0; i < NSP; ++i) { work1[INDEX(i)] = savedActions[INDEX(NSP + i)] - work2[INDEX(i)] - gy[INDEX(i)]; } //work1 is now equal to Dn2 //partially compute Un3 as: //Un3 = y + ** h * phi_1(hA) * fy ** + h * phi_1(hA) * Dn2 //Un3 = y + ** h * beta * Vm * phiHm(:, m) ** //now we need the action of the exponential on Dn2 int m1 = arnoldi(1.0, 4, h, A, solver, work1, &beta, work2, work4); if (m1 + 4 >= STRIDE || m1 < 0) { //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } //save Phi3(h * A) * Dn2 to savedActions[0] //save Phi4(h * A) * Dn2 to savedActions[NSP] //add the action of phi_1 on Dn2 to y and hn * phi_1(hA) * fy to get Un3 in[0] = &phiHm[GRID_DIM * ((m1 + 2) * STRIDE)]; in[1] = &phiHm[GRID_DIM * ((m1 + 3) * STRIDE)]; in[2] = &phiHm[GRID_DIM * ((m1) * STRIDE)]; out[0] = &savedActions[GRID_DIM * NSP]; out[1] = &savedActions[GRID_DIM * 2 * NSP]; scale_vec[0] = beta / (h * h); scale_vec[1] = beta / (h * h * h); scale_vec[2] = beta; matvec_n_by_m_scale_special(m1, scale_vec, Vm, in, out); //Un3 is now in work1 //next compute Dn3 //Dn3 = F(Un3) - A * Un3 - gy dydt(t, pr, work1, &savedActions[GRID_DIM * 3 * NSP], mech); sparse_multiplier(A, work1, work2); #pragma unroll for (int i = 0; i < NSP; ++i) { work1[INDEX(i)] = savedActions[INDEX(3 * NSP + i)] - work2[INDEX(i)] - gy[INDEX(i)]; } //work1 is now equal to Dn3 //finally we need the action of the exponential on Dn3 int m2 = arnoldi(1.0, 4, h, A, solver, work1, &beta, work2, work4); if (m2 + 4 >= STRIDE || m2 < 0) { //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } out[0] = &savedActions[GRID_DIM * 3 * NSP]; out[1] = &savedActions[GRID_DIM * 4 * NSP]; in[0] = &phiHm[GRID_DIM * (m2 + 2) * STRIDE]; in[1] = &phiHm[GRID_DIM * (m2 + 3) * STRIDE]; scale_vec[0] = beta / (h * h); scale_vec[1] = beta / (h * h * h); matvec_n_by_m_scale_special2(m2, scale_vec, Vm, in, out); //construct y1 and error vector #pragma unroll for (int i = 0; i < NSP; ++i) { //y1 = y + h * phi1(h * A) * fy + h * sum(bi * Dni) y1[INDEX(i)] = y[INDEX(i)] + savedActions[INDEX(i)] + 16.0 * savedActions[INDEX(NSP + i)] - 48.0 * savedActions[INDEX(2 * NSP + i)] + -2.0 * savedActions[INDEX(3 * NSP + i)] + 12.0 * savedActions[INDEX(4 * NSP + i)]; //error vec work1[INDEX(i)] = 48.0 * savedActions[INDEX(2 * NSP + i)] - 12.0 * savedActions[INDEX(4 * NSP + i)]; } //scale and find err scale (y, y1, work2); err = fmax(EPS, sc_norm(work1, work2)); // classical step size calculation h_new = pow(err, -1.0 / ORD); #ifdef LOG_KRYLOV_AND_STEPSIZES if (T_ID == 0 && num_integrator_steps >= 0) { err_log[num_integrator_steps] = err; m_log[num_integrator_steps] = m; m1_log[num_integrator_steps] = m1; m2_log[num_integrator_steps] = m2; t_log[num_integrator_steps] = t; h_log[num_integrator_steps] = h; reject_log[num_integrator_steps] = err > 1.0; num_integrator_steps++; if (num_integrator_steps >= MAX_STEPS) { printf("Number of steps out of bounds! Overwriting\n"); num_integrator_steps = -1; } } #endif #ifndef CONST_TIME_STEP failures = 0; if (err <= 1.0) { // update y, scale vector and t #pragma unroll for (int i = 0; i < NSP; ++i) { sc[INDEX(i)] = work2[INDEX(i)]; y[INDEX(i)] = y1[INDEX(i)]; } t += h; // minimum of classical and Gustafsson step size prediction h_new = fmin(h_new, (h / h_old) * pow((err_old / (err * err)), (1.0 / ORD))); // limit to 0.2 <= (h_new/8) <= 8.0 h_new = h * fmax(fmin(0.9 * h_new, 8.0), 0.2); // store time step and error err_old = fmax(1.0e-2, err); h_old = h; // check if last step rejected if (reject) { h_new = fmin(h, h_new); reject = false; } h = fmin(h_new, t_end - t); } else { // limit to 0.2 <= (h_new/8) <= 8.0 h_new = h * fmax(fmin(0.9 * h_new, 8.0), 0.2); h_new = fmin(h_new, t_end - t); reject = true; h = fmin(h, h_new); } #else //constant time stepping //update y & t #pragma unroll for (int i = 0; i < NSP; ++i) { y[INDEX(i)] = y1[INDEX(i)]; } t += h; #endif } // end while result[T_ID] = EC_success; } #ifdef GENERATE_DOCS } #endif
236b333dd66c3833ec283808ff0c4eb493afba80.cu
/*! * \file exprb43.cu * * \author Nicholas J. Curtis * \date 09/02/2014 * * \brief A krylov subspace integrator using a 4th order (3rd-order embedded) * exponential Rosenbrock method of Hochbruck et al. (2009) * * See full reference: * M. Hochbruck, A. Ostermann, J. Schweitzer, Exponential Rosenbrock-type methods, SIAM J. Numer. Anal. 47 (1) (2009) 786–803. doi:10.1137/080717717. * * NOTE: all matricies stored in column major format! * */ /** Include common code. */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <stdbool.h> #include <cuComplex.h> //various mechanism/solver defns //these should be included first #include "header.cuh" #include "solver_options.cuh" #include "solver_props.cuh" #include "dydt.cuh" #ifndef FINITE_DIFFERENCE #include "jacob.cuh" #else #include "fd_jacob.cuh" #endif #include "exprb43_props.cuh" #include "arnoldi.cuh" #include "exponential_linear_algebra.cuh" #include "solver_init.cuh" #include "gpu_macros.cuh" #ifdef GENERATE_DOCS namespace exprb43cu { #endif #ifdef LOG_KRYLOV_AND_STEPSIZES extern __device__ double err_log[MAX_STEPS]; extern __device__ int m_log[MAX_STEPS]; extern __device__ int m1_log[MAX_STEPS]; extern __device__ int m2_log[MAX_STEPS]; extern __device__ double t_log[MAX_STEPS]; extern __device__ double h_log[MAX_STEPS]; extern __device__ bool reject_log[MAX_STEPS]; extern __device__ int num_integrator_steps; #endif #ifdef DIVERGENCE_TEST extern __device__ int integrator_steps[DIVERGENCE_TEST]; #endif /////////////////////////////////////////////////////////////////////////////// /*! * \fn int integrate(const double t_start, const double t_end, const double pr, double* y) * \param t_start The initial integration time * \param t_end The final integration timestep * \param pr User data passed to the RHS function dydt() - commonly used for the Pressure term * \param y The state vector * \param mech The mechanism memory struct * \param solver The solver memory struct * \brief 4th-order exponential integrator function w/ adaptive Kyrlov subspace approximation * \returns The result of this integration step @see exprb43cu_ErrCodes */ __device__ void integrate (const double t_start, const double t_end, const double pr, double* __restrict__ y, const mechanism_memory* __restrict__ mech, const solver_memory* __restrict__ solver) { //initial time #ifdef CONST_TIME_STEP double h = t_end - t_start; #else double h = fmin(1.0e-8, t_end - t_start); #endif double h_new; double err_old = 1.0; double h_old = h; bool reject = false; double t = t_start; // get scaling for weighted norm double * const __restrict__ sc = solver->sc; scale_init(y, sc); #ifdef LOG_KRYLOV_AND_STEPSIZES if (T_ID == 0) { num_integrator_steps = 0; } #endif double beta = 0; //arrays double * const __restrict__ work1 = solver->work1; double * const __restrict__ work2 = solver->work2; double * const __restrict__ y1 = solver->work3; cuDoubleComplex * const __restrict__ work4 = solver->work4; double * const __restrict__ fy = mech->dy; double * const __restrict__ A = mech->jac; double * const __restrict__ Vm = solver->Vm; double * const __restrict__ phiHm = solver->phiHm; double * const __restrict__ savedActions = solver->savedActions; double * const __restrict__ gy = solver->gy; int * const __restrict__ result = solver->result; //vectors for scaling operations double * in[5] = {0, 0, 0, savedActions, y}; double * out[3] = {0, 0, work1}; double scale_vec[3] = {0, 0, 0}; double err = 0.0; int failures = 0; int steps = 0; while (t < t_end) { //error checking if (failures >= MAX_CONSECUTIVE_ERRORS) { result[T_ID] = EC_consecutive_steps; return; } if (steps++ >= MAX_STEPS) { result[T_ID] = EC_max_steps_exceeded; return; } if (t + h <= t) { result[T_ID] = EC_h_plus_t_equals_h; return; } if (!reject) { dydt (t, pr, y, fy, mech); #ifdef FINITE_DIFFERENCE eval_jacob (t, pr, y, A, mech, work1, work2); #else eval_jacob (t, pr, y, A, mech); #endif //gy = fy - A * y sparse_multiplier(A, y, gy); #pragma unroll for (int i = 0; i < NSP; ++i) { gy[INDEX(i)] = fy[INDEX(i)] - gy[INDEX(i)]; } } #ifdef DIVERGENCE_TEST integrator_steps[T_ID]++; #endif int m = arnoldi(0.5, 1, h, A, solver, fy, &beta, work2, work4); if (m + 1 >= STRIDE || m < 0) { //failure: too many krylov vectors required or singular matrix encountered //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } // Un2 to be stored in work1 //Un2 is partially in the mth column of phiHm //Un2 = y + ** 0.5 * h * phi_1(0.5 * h * A)*fy ** //Un2 = y + ** beta * Vm * phiHm(:, m) ** //store h * beta * Vm * phi_1(h * Hm) * e1 in savedActions matvec_m_by_m_plusequal(m, phiHm, &phiHm[GRID_DIM * (m * STRIDE)], work1); matvec_n_by_m_scale(m, beta, Vm, work1, savedActions); //store 0.5 * h * beta * Vm * phi_1(0.5 * h * Hm) * fy + y in work1 matvec_n_by_m_scale_add(m, beta, Vm, &phiHm[GRID_DIM * (m * STRIDE)], work1, y); //work1 is now equal to Un2 //next compute Dn2 //Dn2 = (F(Un2) - Jn * Un2) - gy dydt(t, pr, work1, &savedActions[GRID_DIM * NSP], mech); sparse_multiplier(A, work1, work2); #pragma unroll for (int i = 0; i < NSP; ++i) { work1[INDEX(i)] = savedActions[INDEX(NSP + i)] - work2[INDEX(i)] - gy[INDEX(i)]; } //work1 is now equal to Dn2 //partially compute Un3 as: //Un3 = y + ** h * phi_1(hA) * fy ** + h * phi_1(hA) * Dn2 //Un3 = y + ** h * beta * Vm * phiHm(:, m) ** //now we need the action of the exponential on Dn2 int m1 = arnoldi(1.0, 4, h, A, solver, work1, &beta, work2, work4); if (m1 + 4 >= STRIDE || m1 < 0) { //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } //save Phi3(h * A) * Dn2 to savedActions[0] //save Phi4(h * A) * Dn2 to savedActions[NSP] //add the action of phi_1 on Dn2 to y and hn * phi_1(hA) * fy to get Un3 in[0] = &phiHm[GRID_DIM * ((m1 + 2) * STRIDE)]; in[1] = &phiHm[GRID_DIM * ((m1 + 3) * STRIDE)]; in[2] = &phiHm[GRID_DIM * ((m1) * STRIDE)]; out[0] = &savedActions[GRID_DIM * NSP]; out[1] = &savedActions[GRID_DIM * 2 * NSP]; scale_vec[0] = beta / (h * h); scale_vec[1] = beta / (h * h * h); scale_vec[2] = beta; matvec_n_by_m_scale_special(m1, scale_vec, Vm, in, out); //Un3 is now in work1 //next compute Dn3 //Dn3 = F(Un3) - A * Un3 - gy dydt(t, pr, work1, &savedActions[GRID_DIM * 3 * NSP], mech); sparse_multiplier(A, work1, work2); #pragma unroll for (int i = 0; i < NSP; ++i) { work1[INDEX(i)] = savedActions[INDEX(3 * NSP + i)] - work2[INDEX(i)] - gy[INDEX(i)]; } //work1 is now equal to Dn3 //finally we need the action of the exponential on Dn3 int m2 = arnoldi(1.0, 4, h, A, solver, work1, &beta, work2, work4); if (m2 + 4 >= STRIDE || m2 < 0) { //need to reduce h and try again h /= 5.0; reject = true; failures++; continue; } out[0] = &savedActions[GRID_DIM * 3 * NSP]; out[1] = &savedActions[GRID_DIM * 4 * NSP]; in[0] = &phiHm[GRID_DIM * (m2 + 2) * STRIDE]; in[1] = &phiHm[GRID_DIM * (m2 + 3) * STRIDE]; scale_vec[0] = beta / (h * h); scale_vec[1] = beta / (h * h * h); matvec_n_by_m_scale_special2(m2, scale_vec, Vm, in, out); //construct y1 and error vector #pragma unroll for (int i = 0; i < NSP; ++i) { //y1 = y + h * phi1(h * A) * fy + h * sum(bi * Dni) y1[INDEX(i)] = y[INDEX(i)] + savedActions[INDEX(i)] + 16.0 * savedActions[INDEX(NSP + i)] - 48.0 * savedActions[INDEX(2 * NSP + i)] + -2.0 * savedActions[INDEX(3 * NSP + i)] + 12.0 * savedActions[INDEX(4 * NSP + i)]; //error vec work1[INDEX(i)] = 48.0 * savedActions[INDEX(2 * NSP + i)] - 12.0 * savedActions[INDEX(4 * NSP + i)]; } //scale and find err scale (y, y1, work2); err = fmax(EPS, sc_norm(work1, work2)); // classical step size calculation h_new = pow(err, -1.0 / ORD); #ifdef LOG_KRYLOV_AND_STEPSIZES if (T_ID == 0 && num_integrator_steps >= 0) { err_log[num_integrator_steps] = err; m_log[num_integrator_steps] = m; m1_log[num_integrator_steps] = m1; m2_log[num_integrator_steps] = m2; t_log[num_integrator_steps] = t; h_log[num_integrator_steps] = h; reject_log[num_integrator_steps] = err > 1.0; num_integrator_steps++; if (num_integrator_steps >= MAX_STEPS) { printf("Number of steps out of bounds! Overwriting\n"); num_integrator_steps = -1; } } #endif #ifndef CONST_TIME_STEP failures = 0; if (err <= 1.0) { // update y, scale vector and t #pragma unroll for (int i = 0; i < NSP; ++i) { sc[INDEX(i)] = work2[INDEX(i)]; y[INDEX(i)] = y1[INDEX(i)]; } t += h; // minimum of classical and Gustafsson step size prediction h_new = fmin(h_new, (h / h_old) * pow((err_old / (err * err)), (1.0 / ORD))); // limit to 0.2 <= (h_new/8) <= 8.0 h_new = h * fmax(fmin(0.9 * h_new, 8.0), 0.2); // store time step and error err_old = fmax(1.0e-2, err); h_old = h; // check if last step rejected if (reject) { h_new = fmin(h, h_new); reject = false; } h = fmin(h_new, t_end - t); } else { // limit to 0.2 <= (h_new/8) <= 8.0 h_new = h * fmax(fmin(0.9 * h_new, 8.0), 0.2); h_new = fmin(h_new, t_end - t); reject = true; h = fmin(h, h_new); } #else //constant time stepping //update y & t #pragma unroll for (int i = 0; i < NSP; ++i) { y[INDEX(i)] = y1[INDEX(i)]; } t += h; #endif } // end while result[T_ID] = EC_success; } #ifdef GENERATE_DOCS } #endif
40747d356388c0fb626e3b926e850870c3883037.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * CudaSolverKernels.cu * * Created on: 10 Feb 2018 * Author: konstantin */ #include "CudaSolverKernels.h" #include "Util.h" #include "Constants.h" //Compute F component. X and Y and block local #define BLOCK_SIZE 16 //Device Constants __constant__ float w; __constant__ float reynold; __constant__ float deltaTime; __constant__ float cellSize; __constant__ float gravityX; __constant__ float gravityY; /* * Optimisation notes: * In computeF, computeG if we unwrap uGrid(x, y) + deltaTime*((1.0f/reynold)*(duuxx+duuyy) - duux - duvy + gravityX); * and calculate each element and add it. It is possible to reduce the number of registers in use. * Don't know if this matters. * */ class Grid{ public: __device__ Grid(float * ptr, int size){ ptr_ = ptr; size_ = size; } __device__ inline float& operator()(int x, int y){ return ptr_[y * size_ + x]; } __device__ inline const float& operator()(int x, int y)const{ return ptr_[y * size_ + x]; } private: int size_; float * ptr_; }; /** Copies a grid from global to local memory Warning: Should only be called on a thread which corresponds to a poin inside the grid @param global Pointer to global memory @param local Pointer to local memory block @param globalSize True Size of the edge the global grid */ //Note gridSize + 2 = globalSize __device__ inline float fsquare(float val){ return val*val; } __device__ inline float computeF(int x, int y, const Grid& uGrid, const Grid& vGrid){ float duux = (1.0f/cellSize)* ( fsquare(((uGrid(x, y) + uGrid(x+1, y) )/ 2.0f)) - fsquare(((uGrid(x-1, y) + uGrid(x, y)) / 2.0f)) ) + GAMMA*(1.0f/cellSize)* ( fabsf((uGrid(x, y)+uGrid(x+1, y))/2.0f)*((uGrid(x, y)-uGrid(x+1,y))/2.0f) - fabsf((uGrid(x-1, y)+uGrid(x, y))/2.0f)*((uGrid(x -1, y)-uGrid(x,y))/2.0f)); float duvy = (1.0f/cellSize)* ( ((vGrid(x, y)+vGrid(x+1, y))/2.0f)*((uGrid(x, y)+uGrid(x,y+1))/2.0f) - ((vGrid(x, y -1)+vGrid(x+1, y-1))/2.0f)*((uGrid(x, y -1)+uGrid(x,y))/2.0f) ) + GAMMA*(1.0f/cellSize)* ( fabsf((vGrid(x, y)+vGrid(x+1, y))/2.0f)*((uGrid(x, y)-uGrid(x,y+1))/2.0f) - fabsf((vGrid(x, y-1)+vGrid(x+1, y-1))/2.0f)*((uGrid(x, y-1)-uGrid(x,y))/2.0f) ); float duuxx = ( uGrid(x+1, y) - 2*uGrid(x, y) + uGrid(x-1, y) ) / fsquare(cellSize); float duuyy = ( uGrid(x, y+1) - 2*uGrid(x, y) + uGrid(x, y-1) ) / fsquare(cellSize); float F = uGrid(x, y) + deltaTime*((1.0f/reynold)*(duuxx+duuyy) - duux - duvy + gravityX); return F; } __device__ inline float computeG(int x, int y, const Grid& uGrid, const Grid& vGrid ){ float duvx= (1.0f/cellSize)* ( ((uGrid(x,y)+uGrid(x,y+1))/2.0f)*((vGrid(x,y)+vGrid(x+1,y))/2.0f) - ((uGrid(x-1,y)+uGrid(x-1,y+1))/2.0f)*((vGrid(x-1,y )+vGrid(x,y))/2.0f) ) + GAMMA*(1.0f/cellSize)* ( fabsf((uGrid(x,y)+uGrid(x,y+1))/2.0f)*((vGrid(x,y)-vGrid(x+1,y))/2.0f) - fabsf((uGrid(x-1,y)+uGrid(x-1,y+1))/2.0f)*((vGrid(x-1,y)-vGrid(x,y))/2.0f) ); float dvvy=(1.0f/cellSize)* ( fsquare(((vGrid(x,y)+vGrid(x,y+1))/2.0f)) - fsquare(((vGrid(x,y-1)+vGrid(x,y))/2.0f)) ) + GAMMA*(1.0f/cellSize)* ( fabsf((vGrid(x,y)+vGrid(x,y+1))/2.0f)*((vGrid(x,y)-vGrid(x,y+1))/2.0f) - fabsf((vGrid(x,y-1)+vGrid(x,y))/2.0f)*((vGrid(x,y-1)-vGrid(x,y))/2.0f) ); float dvvxx = ( vGrid(x+1, y) - 2*vGrid(x, y) + vGrid(x-1, y) ) / fsquare(cellSize); float dvvyy = ( vGrid(x, y+1) - 2*vGrid(x, y) + vGrid(x, y-1) ) / fsquare(cellSize); float G = vGrid(x, y) + deltaTime*((1.0f/reynold)*(dvvxx+dvvyy) - dvvy - duvx + gravityY); return G; } __device__ inline float computeRHS(int x, int y, const Grid &fGrid, const Grid &gGrid){ float rhs = ( (fGrid(x,y)-fGrid(x-1,y)) / cellSize + (gGrid(x,y)-gGrid(x,y-1)) / cellSize) /deltaTime; return rhs; } __device__ inline float computeU(int x, int y, const Grid &fGrid, const Grid &pGrid){ float u = fGrid(x, y) - (deltaTime/cellSize) * (pGrid(x+1,y) - pGrid(x,y)); return u; } __device__ inline float computeV(int x, int y, const Grid &gGrid, const Grid &pGrid){ float v = gGrid(x, y) - (deltaTime/cellSize) * (pGrid(x,y+1) - pGrid(x,y)); return v; } __device__ inline float relax(int x, int y, const Grid pGrid, const Grid &rhsGrid){ float cellsq = cellSize*cellSize; return (1-w)*pGrid(x,y) + w * (cellsq/4.0f)* ( (pGrid(x+1,y)+pGrid(x-1,y))/cellsq + (pGrid(x,y+1)+pGrid(x,y-1))/cellsq - pGrid(x,y) ); /* float residual = ( (pg(x+1,y) - pg(x,y)) - (pg(x,y) - pg(x-1,y)) )/(cellsq) + ( (pg(x,y+1) - pg(x,y)) - (pg(x,y) - pg(x,y-1)) )/(cellsq) - rh(x,y); */ } __device__ inline void load_local_grid(float * global, float * local, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; //Copy Global memory to correspoding local memory inisde the work group //How local memory should look (assumuing blockDim.x == blockDim.y == 8 //Local memory should be 10 x 10 grid local[(threadIdx.y +1) * (blockDim.x + 2) + threadIdx.x + 1] = global[(gridY + 1)* globalEdgeSize + gridX + 1]; //Local memory now // 0 0 0 0 0 0 0 0 0 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 0 0 0 0 0 0 0 0 0 //Now copy memory from boundary conditions //----COPY BOUNDARY CONDITIONS if(threadIdx.x == 0 ){ local[(threadIdx.y +1) * (blockDim.x + 2) ] = global[(gridY + 1)* globalEdgeSize + gridX]; } //Local memory now // 0 0 0 0 0 0 0 0 0 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.x == blockDim.x - 1 ){ local[(threadIdx.y +1) * (blockDim.x + 2) + threadIdx.x + 2] = global[(gridY + 1)* globalEdgeSize + gridX + 2]; } //Local memory now // 0 0 0 0 0 0 0 0 0 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.y == 0 ){ local[ threadIdx.x + 1] = global[(gridY)* globalEdgeSize + gridX + 1]; } //Local memory now // 0 x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.y == blockDim.y -1 ){ local[(threadIdx.y + 2) * (blockDim.x + 2) + threadIdx.x + 1] = global[(gridY + 2)* globalEdgeSize + gridX + 1]; } //Local memory now // 0 x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x 0 if(threadIdx.x == 1 and threadIdx.y == 1){ local[0] = global[(gridY - 1)* globalEdgeSize + gridX - 1]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x 0 if(threadIdx.x == blockDim.x-2 and threadIdx.y == blockDim.y-2){ local[(blockDim.y + 2) * (blockDim.x + 2) -1] = global[(gridY + 2)* globalEdgeSize + gridX + 1]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x x if(threadIdx.x == 1 and threadIdx.y == blockDim.y-2){ local[(blockDim.y + 1) * (blockDim.x + 2)] = global[(gridY + 1)* globalEdgeSize]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x x x x x x x x x X if(threadIdx.x == blockDim.x-2 and threadIdx.y == 1){ local[blockDim.x + 1] = global[globalEdgeSize - 1]; } //Local memory now // X x x x x x x x x x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x x __syncthreads(); } __global__ void clear_staggered_grid_kernel(float * UGrid, float * VGrid, float * FGrid, float * GGrid, float * RHSGrid, float * PressureGrid, int edgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int gridY = blockIdx.y * blockDim.y + threadIdx.y; int location = gridY * edgeSize + gridX; if(location >= edgeSize*edgeSize){ return ; } UGrid[gridY * edgeSize + gridX] = 0.0f; VGrid[gridY * edgeSize + gridX] = 0.0f; FGrid[gridY * edgeSize + gridX] = 0.0f; GGrid[gridY * edgeSize + gridX] = 0.0f; RHSGrid[gridY * edgeSize + gridX] = 0.0f; PressureGrid[gridY * edgeSize + gridX] = 0.0f; } __global__ void null_boundary_kernel(float * grid, int globalEdgeSize){ //Note: kernel is 1 dismentional int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; location = gridX; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //North boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //South boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize) grid[location] = 0.0f; //West boundary location = globalEdgeSize * (gridX+1) - 1; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //East boundary } /* Grid size is the size of the non edge-grid (aka 2 smaller than gridSize) * We are executing for innerEdgeSize x innerEdgeSize grid * */ __global__ void computeFGGrid_kernel(float * uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr, int innerEdgeSize){ //Shared mem should be size (blockDim.x + 2)^2 extern __shared__ float shared_mem[]; float * sharedUGrid = (float*) shared_mem; float * sharedVGrid = (float*) &sharedUGrid[(blockDim.x+2)*(blockDim.x+2)]; int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridY > innerEdgeSize || gridX > innerEdgeSize){ return; } load_local_grid(uGridPtr, sharedUGrid, innerEdgeSize + 2); load_local_grid(vGridPtr, sharedVGrid, innerEdgeSize + 2); Grid uGrid(sharedUGrid, blockDim.x + 2); Grid vGrid(sharedVGrid, blockDim.x + 2); int globalId = (gridY + 1)* (innerEdgeSize+2) + gridX + 1; //sharedUGrid and sharedVGrid should be (gridDim.x + 2)^2 fGridPtr[globalId] = computeF( threadIdx.x + 1, threadIdx.y + 1, uGrid, vGrid); gGridPtr[globalId] = computeG( threadIdx.x + 1, threadIdx.y + 1, uGrid, vGrid); } //Copies the velocities from the edges of the ibber grid to the edges of the outer grid __global__ void copyUVBoundaries_kernel(float *uGrid, float *vGrid, float * fGrid, float *gGrid, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; //North boundary location = gridX; if(location < globalEdgeSize * globalEdgeSize){ fGrid[location] = uGrid[location + globalEdgeSize]; gGrid[location] = vGrid[location + globalEdgeSize]; } //South boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location - globalEdgeSize]; gGrid[location] = vGrid[location - globalEdgeSize]; } //West boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location + 1]; gGrid[location] = vGrid[location + 1]; } //East boundary location = globalEdgeSize*(gridX+1) - 1; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location - 1]; gGrid[location] = vGrid[location - 1]; } } __global__ void copyPressureBoundaries(float *pGrid, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; //North boundary location = gridX; if(location < globalEdgeSize * globalEdgeSize){ pGrid[location] = pGrid[location + globalEdgeSize]; pGrid[location] = pGrid[location + globalEdgeSize]; } //South boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location - globalEdgeSize]; pGrid[location] = pGrid[location - globalEdgeSize]; } //West boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location + 1]; pGrid[location] = pGrid[location + 1]; } //East boundary location = globalEdgeSize*(gridX+1) - 1; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location - 1]; pGrid[location] = pGrid[location - 1]; } } __global__ void computeRHSGrid_kernel(float * fGridPtr, float * gGridPtr, float * rGridPtr, int innerEdgeSize){ extern __shared__ float shared_mem[]; float * sharedFGrid = (float*) shared_mem; float * sharedGGrid = (float*) &sharedFGrid[(blockDim.x+2)*(blockDim.x+2)]; int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridY > innerEdgeSize || gridX > innerEdgeSize){ return; } load_local_grid(fGridPtr, sharedFGrid, innerEdgeSize + 2); load_local_grid(gGridPtr, sharedGGrid, innerEdgeSize + 2); Grid fGrid(sharedFGrid, blockDim.x + 2); Grid gGrid(sharedGGrid, blockDim.x + 2); int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); //sharedUGrid and sharedVGrid should be (gridDim.x + 2)^2 rGridPtr[globalId] = computeRHS(threadIdx.x + 1, threadIdx.y + 1, fGrid, gGrid); } __global__ void computeRedCells_kernel(float * pGridPtr, float * rhsGridPtr, int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pressureGrid(pressure_cache, blockDim.x + 2); Grid rhsGrid(rhsGridPtr, blockDim.x + 2); //If even if(!((gridY * innerEdgeSize + gridX) & 1)){ int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); pGridPtr[globalId] = relax(threadIdx.x + 1, threadIdx.y + 1, pressureGrid, rhsGrid); } //Naive implementation } __global__ void computeBlackCells_kernel(float * pGridPtr, float * rhsGrdiPtr, int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pressureGrid(pressure_cache, blockDim.x + 2); Grid rhsGrid(rhsGrdiPtr, blockDim.x + 2); //If even if(((gridY * innerEdgeSize + gridX) & 1)){ int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); pGridPtr[globalId] = relax(threadIdx.x + 1, threadIdx.y + 1, pressureGrid, rhsGrid); } //Naive implementation } __global__ void computeUV_kernel(float *pGridPtr, float* uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr , int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pGrid(pressure_cache, blockDim.x + 2); Grid uGrid(uGridPtr, blockDim.x + 2); Grid vGrid(vGridPtr, blockDim.x + 2); Grid fGrid(fGridPtr, blockDim.x + 2); Grid gGrid(gGridPtr, blockDim.x + 2); //No point in making separete inline functions for simple computations so add +1 for easier indexing //Note: pGrid is in shared memory and all the other grids are global memory uGrid(gridX, gridY) = fGrid(gridX, gridY) - ((deltaTime)/cellSize)* ( pGrid(threadIdx.x+1,threadIdx.y)-pGrid(threadIdx.x,threadIdx.y) ); vGrid(gridX, gridY) = gGrid(gridX,gridY) - ((deltaTime)/cellSize)* ( pGrid(threadIdx.x,threadIdx.y+1)-pGrid(threadIdx.x,threadIdx.y) ); } __global__ void setGridPoint_kernel(float * grid, float value, int point){ grid[point] = value; } //HOST FUNCTIONS //Note this is the true edge size void clear_staggered_grid_host( float * UGrid, float * VGrid, float * FGrid, float * GGrid, float * RHSGrid, float * PressureGrid, int globalEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(globalEdgeSize / threadsPerBlock.x + 1, globalEdgeSize / threadsPerBlock.y + 1, 1); hipLaunchKernelGGL(( clear_staggered_grid_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, UGrid, VGrid, FGrid, GGrid, RHSGrid, PressureGrid, globalEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } /*Sets the edges of the grid to null*/ void null_boundary_host(float * grid, int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); hipLaunchKernelGGL(( null_boundary_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, grid, (innerEdgeSize+2)); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } /* * Only runs on the cells not on the edges * */ void computeFGGrid_host(float * uGrid, float * vGrid, float * fGrid, float * gGrid, int innerEdgeSize){ //Copy all the boundaries dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); hipLaunchKernelGGL(( copyUVBoundaries_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, uGrid, vGrid, fGrid, gGrid, innerEdgeSize + 2); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); //Reuse threadsPerBlock and numBlocks threadsPerBlock.x = BLOCK_SIZE; threadsPerBlock.y = BLOCK_SIZE; numBlocks.x = innerEdgeSize / threadsPerBlock.x + 1; numBlocks.y = innerEdgeSize / threadsPerBlock.y + 1; hipLaunchKernelGGL(( computeFGGrid_kernel) , dim3(numBlocks), dim3(threadsPerBlock), 2*(BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float), 0, uGrid, vGrid, fGrid, gGrid, innerEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } void computeRHSGrid_host(float *fGrid, float *gGrid, float * rGrid, int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(innerEdgeSize / threadsPerBlock.x + 1, innerEdgeSize / threadsPerBlock.y + 1, 1); hipLaunchKernelGGL(( computeRHSGrid_kernel) , dim3(numBlocks), dim3(threadsPerBlock), 2*(BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float), 0, fGrid, gGrid, rGrid, innerEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); //Compute RHS } void doSorStep_host(float * pGrid, float * rhsGrid, int innerEdgeSize){ //Copy the edges of the inner grid to the edges of the outergrid //Aka set boundary conditions dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); //__global__ void copyPressureBoundaries(float *pGrid, int globalEdgeSize){ hipLaunchKernelGGL(( copyPressureBoundaries), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, pGrid, innerEdgeSize + 2); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); //Reuse threadsPerBlock and numBlocks threadsPerBlock.x = BLOCK_SIZE; threadsPerBlock.y = BLOCK_SIZE; numBlocks.x = innerEdgeSize / threadsPerBlock.x + 1; numBlocks.y = innerEdgeSize / threadsPerBlock.y + 1; //Do the red checkerboard hipLaunchKernelGGL(( computeRedCells_kernel) , dim3(numBlocks), dim3(threadsPerBlock), (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float), 0, pGrid, rhsGrid, innerEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); //Do the red checkerboard hipLaunchKernelGGL(( computeBlackCells_kernel) , dim3(numBlocks), dim3(threadsPerBlock), (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float), 0, pGrid, rhsGrid, innerEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } void computeUV_host(float *pGrid, float *uGrid, float * vGrid, float *fGrid, float *gGrid, int innerEdgeSize){ //__global__ void computeUV_kernel(float *pGridPtr, float* uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr , int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(innerEdgeSize / threadsPerBlock.x + 1, innerEdgeSize / threadsPerBlock.y + 1, 1); hipLaunchKernelGGL(( computeUV_kernel) , dim3(numBlocks), dim3(threadsPerBlock), (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float), 0, pGrid, uGrid, vGrid, fGrid, gGrid, innerEdgeSize); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } void setGridPoint_host(float * grid, float value, int point){ //Most inefficient way to set a single value(sadly there isn't any other way) //must launch a single thread kernel hipLaunchKernelGGL(( setGridPoint_kernel), dim3(1), dim3(1), 0, 0, grid, value, point); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); } void devSetOmega(float omega_){ hipMemcpyToSymbol(w, &omega_, sizeof(float)); } void devSetReynold(float reynold_){ hipMemcpyToSymbol(reynold, &reynold_, sizeof(float)); } void devSetDeltaTime(float deltaTime_){ hipMemcpyToSymbol(deltaTime, &deltaTime_, sizeof(float)); } void devSetCellSize(float cellSize_){ hipMemcpyToSymbol(cellSize, &cellSize_, sizeof(float)); } void devSetGravity(float gravityX_, float gravityY_){ hipMemcpyToSymbol(gravityX, &gravityX_, sizeof(float)); hipMemcpyToSymbol(gravityY, &gravityY_, sizeof(float)); }
40747d356388c0fb626e3b926e850870c3883037.cu
/* * CudaSolverKernels.cu * * Created on: 10 Feb 2018 * Author: konstantin */ #include "CudaSolverKernels.h" #include "Util.h" #include "Constants.h" //Compute F component. X and Y and block local #define BLOCK_SIZE 16 //Device Constants __constant__ float w; __constant__ float reynold; __constant__ float deltaTime; __constant__ float cellSize; __constant__ float gravityX; __constant__ float gravityY; /* * Optimisation notes: * In computeF, computeG if we unwrap uGrid(x, y) + deltaTime*((1.0f/reynold)*(duuxx+duuyy) - duux - duvy + gravityX); * and calculate each element and add it. It is possible to reduce the number of registers in use. * Don't know if this matters. * */ class Grid{ public: __device__ Grid(float * ptr, int size){ ptr_ = ptr; size_ = size; } __device__ inline float& operator()(int x, int y){ return ptr_[y * size_ + x]; } __device__ inline const float& operator()(int x, int y)const{ return ptr_[y * size_ + x]; } private: int size_; float * ptr_; }; /** Copies a grid from global to local memory Warning: Should only be called on a thread which corresponds to a poin inside the grid @param global Pointer to global memory @param local Pointer to local memory block @param globalSize True Size of the edge the global grid */ //Note gridSize + 2 = globalSize __device__ inline float fsquare(float val){ return val*val; } __device__ inline float computeF(int x, int y, const Grid& uGrid, const Grid& vGrid){ float duux = (1.0f/cellSize)* ( fsquare(((uGrid(x, y) + uGrid(x+1, y) )/ 2.0f)) - fsquare(((uGrid(x-1, y) + uGrid(x, y)) / 2.0f)) ) + GAMMA*(1.0f/cellSize)* ( fabsf((uGrid(x, y)+uGrid(x+1, y))/2.0f)*((uGrid(x, y)-uGrid(x+1,y))/2.0f) - fabsf((uGrid(x-1, y)+uGrid(x, y))/2.0f)*((uGrid(x -1, y)-uGrid(x,y))/2.0f)); float duvy = (1.0f/cellSize)* ( ((vGrid(x, y)+vGrid(x+1, y))/2.0f)*((uGrid(x, y)+uGrid(x,y+1))/2.0f) - ((vGrid(x, y -1)+vGrid(x+1, y-1))/2.0f)*((uGrid(x, y -1)+uGrid(x,y))/2.0f) ) + GAMMA*(1.0f/cellSize)* ( fabsf((vGrid(x, y)+vGrid(x+1, y))/2.0f)*((uGrid(x, y)-uGrid(x,y+1))/2.0f) - fabsf((vGrid(x, y-1)+vGrid(x+1, y-1))/2.0f)*((uGrid(x, y-1)-uGrid(x,y))/2.0f) ); float duuxx = ( uGrid(x+1, y) - 2*uGrid(x, y) + uGrid(x-1, y) ) / fsquare(cellSize); float duuyy = ( uGrid(x, y+1) - 2*uGrid(x, y) + uGrid(x, y-1) ) / fsquare(cellSize); float F = uGrid(x, y) + deltaTime*((1.0f/reynold)*(duuxx+duuyy) - duux - duvy + gravityX); return F; } __device__ inline float computeG(int x, int y, const Grid& uGrid, const Grid& vGrid ){ float duvx= (1.0f/cellSize)* ( ((uGrid(x,y)+uGrid(x,y+1))/2.0f)*((vGrid(x,y)+vGrid(x+1,y))/2.0f) - ((uGrid(x-1,y)+uGrid(x-1,y+1))/2.0f)*((vGrid(x-1,y )+vGrid(x,y))/2.0f) ) + GAMMA*(1.0f/cellSize)* ( fabsf((uGrid(x,y)+uGrid(x,y+1))/2.0f)*((vGrid(x,y)-vGrid(x+1,y))/2.0f) - fabsf((uGrid(x-1,y)+uGrid(x-1,y+1))/2.0f)*((vGrid(x-1,y)-vGrid(x,y))/2.0f) ); float dvvy=(1.0f/cellSize)* ( fsquare(((vGrid(x,y)+vGrid(x,y+1))/2.0f)) - fsquare(((vGrid(x,y-1)+vGrid(x,y))/2.0f)) ) + GAMMA*(1.0f/cellSize)* ( fabsf((vGrid(x,y)+vGrid(x,y+1))/2.0f)*((vGrid(x,y)-vGrid(x,y+1))/2.0f) - fabsf((vGrid(x,y-1)+vGrid(x,y))/2.0f)*((vGrid(x,y-1)-vGrid(x,y))/2.0f) ); float dvvxx = ( vGrid(x+1, y) - 2*vGrid(x, y) + vGrid(x-1, y) ) / fsquare(cellSize); float dvvyy = ( vGrid(x, y+1) - 2*vGrid(x, y) + vGrid(x, y-1) ) / fsquare(cellSize); float G = vGrid(x, y) + deltaTime*((1.0f/reynold)*(dvvxx+dvvyy) - dvvy - duvx + gravityY); return G; } __device__ inline float computeRHS(int x, int y, const Grid &fGrid, const Grid &gGrid){ float rhs = ( (fGrid(x,y)-fGrid(x-1,y)) / cellSize + (gGrid(x,y)-gGrid(x,y-1)) / cellSize) /deltaTime; return rhs; } __device__ inline float computeU(int x, int y, const Grid &fGrid, const Grid &pGrid){ float u = fGrid(x, y) - (deltaTime/cellSize) * (pGrid(x+1,y) - pGrid(x,y)); return u; } __device__ inline float computeV(int x, int y, const Grid &gGrid, const Grid &pGrid){ float v = gGrid(x, y) - (deltaTime/cellSize) * (pGrid(x,y+1) - pGrid(x,y)); return v; } __device__ inline float relax(int x, int y, const Grid pGrid, const Grid &rhsGrid){ float cellsq = cellSize*cellSize; return (1-w)*pGrid(x,y) + w * (cellsq/4.0f)* ( (pGrid(x+1,y)+pGrid(x-1,y))/cellsq + (pGrid(x,y+1)+pGrid(x,y-1))/cellsq - pGrid(x,y) ); /* float residual = ( (pg(x+1,y) - pg(x,y)) - (pg(x,y) - pg(x-1,y)) )/(cellsq) + ( (pg(x,y+1) - pg(x,y)) - (pg(x,y) - pg(x,y-1)) )/(cellsq) - rh(x,y); */ } __device__ inline void load_local_grid(float * global, float * local, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; //Copy Global memory to correspoding local memory inisde the work group //How local memory should look (assumuing blockDim.x == blockDim.y == 8 //Local memory should be 10 x 10 grid local[(threadIdx.y +1) * (blockDim.x + 2) + threadIdx.x + 1] = global[(gridY + 1)* globalEdgeSize + gridX + 1]; //Local memory now // 0 0 0 0 0 0 0 0 0 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 X X X X X X X X 0 // 0 0 0 0 0 0 0 0 0 0 //Now copy memory from boundary conditions //----COPY BOUNDARY CONDITIONS if(threadIdx.x == 0 ){ local[(threadIdx.y +1) * (blockDim.x + 2) ] = global[(gridY + 1)* globalEdgeSize + gridX]; } //Local memory now // 0 0 0 0 0 0 0 0 0 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // x X X X X X X X X 0 // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.x == blockDim.x - 1 ){ local[(threadIdx.y +1) * (blockDim.x + 2) + threadIdx.x + 2] = global[(gridY + 1)* globalEdgeSize + gridX + 2]; } //Local memory now // 0 0 0 0 0 0 0 0 0 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.y == 0 ){ local[ threadIdx.x + 1] = global[(gridY)* globalEdgeSize + gridX + 1]; } //Local memory now // 0 x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 0 0 0 0 0 0 0 0 0 if(threadIdx.y == blockDim.y -1 ){ local[(threadIdx.y + 2) * (blockDim.x + 2) + threadIdx.x + 1] = global[(gridY + 2)* globalEdgeSize + gridX + 1]; } //Local memory now // 0 x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x 0 if(threadIdx.x == 1 and threadIdx.y == 1){ local[0] = global[(gridY - 1)* globalEdgeSize + gridX - 1]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x 0 if(threadIdx.x == blockDim.x-2 and threadIdx.y == blockDim.y-2){ local[(blockDim.y + 2) * (blockDim.x + 2) -1] = global[(gridY + 2)* globalEdgeSize + gridX + 1]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x x if(threadIdx.x == 1 and threadIdx.y == blockDim.y-2){ local[(blockDim.y + 1) * (blockDim.x + 2)] = global[(gridY + 1)* globalEdgeSize]; } //Local memory now // X x x x x x x x x 0 // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x x x x x x x x x X if(threadIdx.x == blockDim.x-2 and threadIdx.y == 1){ local[blockDim.x + 1] = global[globalEdgeSize - 1]; } //Local memory now // X x x x x x x x x x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // x X X X X X X X X x // 0 x x x x x x x x x __syncthreads(); } __global__ void clear_staggered_grid_kernel(float * UGrid, float * VGrid, float * FGrid, float * GGrid, float * RHSGrid, float * PressureGrid, int edgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int gridY = blockIdx.y * blockDim.y + threadIdx.y; int location = gridY * edgeSize + gridX; if(location >= edgeSize*edgeSize){ return ; } UGrid[gridY * edgeSize + gridX] = 0.0f; VGrid[gridY * edgeSize + gridX] = 0.0f; FGrid[gridY * edgeSize + gridX] = 0.0f; GGrid[gridY * edgeSize + gridX] = 0.0f; RHSGrid[gridY * edgeSize + gridX] = 0.0f; PressureGrid[gridY * edgeSize + gridX] = 0.0f; } __global__ void null_boundary_kernel(float * grid, int globalEdgeSize){ //Note: kernel is 1 dismentional int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; location = gridX; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //North boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //South boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize) grid[location] = 0.0f; //West boundary location = globalEdgeSize * (gridX+1) - 1; if(location < globalEdgeSize * globalEdgeSize) grid[location] = 0.0f; //East boundary } /* Grid size is the size of the non edge-grid (aka 2 smaller than gridSize) * We are executing for innerEdgeSize x innerEdgeSize grid * */ __global__ void computeFGGrid_kernel(float * uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr, int innerEdgeSize){ //Shared mem should be size (blockDim.x + 2)^2 extern __shared__ float shared_mem[]; float * sharedUGrid = (float*) shared_mem; float * sharedVGrid = (float*) &sharedUGrid[(blockDim.x+2)*(blockDim.x+2)]; int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridY > innerEdgeSize || gridX > innerEdgeSize){ return; } load_local_grid(uGridPtr, sharedUGrid, innerEdgeSize + 2); load_local_grid(vGridPtr, sharedVGrid, innerEdgeSize + 2); Grid uGrid(sharedUGrid, blockDim.x + 2); Grid vGrid(sharedVGrid, blockDim.x + 2); int globalId = (gridY + 1)* (innerEdgeSize+2) + gridX + 1; //sharedUGrid and sharedVGrid should be (gridDim.x + 2)^2 fGridPtr[globalId] = computeF( threadIdx.x + 1, threadIdx.y + 1, uGrid, vGrid); gGridPtr[globalId] = computeG( threadIdx.x + 1, threadIdx.y + 1, uGrid, vGrid); } //Copies the velocities from the edges of the ibber grid to the edges of the outer grid __global__ void copyUVBoundaries_kernel(float *uGrid, float *vGrid, float * fGrid, float *gGrid, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; //North boundary location = gridX; if(location < globalEdgeSize * globalEdgeSize){ fGrid[location] = uGrid[location + globalEdgeSize]; gGrid[location] = vGrid[location + globalEdgeSize]; } //South boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location - globalEdgeSize]; gGrid[location] = vGrid[location - globalEdgeSize]; } //West boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location + 1]; gGrid[location] = vGrid[location + 1]; } //East boundary location = globalEdgeSize*(gridX+1) - 1; if(location < globalEdgeSize*globalEdgeSize){ fGrid[location] = uGrid[location - 1]; gGrid[location] = vGrid[location - 1]; } } __global__ void copyPressureBoundaries(float *pGrid, int globalEdgeSize){ int gridX = blockIdx.x * blockDim.x + threadIdx.x; int location; //North boundary location = gridX; if(location < globalEdgeSize * globalEdgeSize){ pGrid[location] = pGrid[location + globalEdgeSize]; pGrid[location] = pGrid[location + globalEdgeSize]; } //South boundary location = globalEdgeSize * (globalEdgeSize - 1) + gridX; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location - globalEdgeSize]; pGrid[location] = pGrid[location - globalEdgeSize]; } //West boundary location = globalEdgeSize * gridX; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location + 1]; pGrid[location] = pGrid[location + 1]; } //East boundary location = globalEdgeSize*(gridX+1) - 1; if(location < globalEdgeSize*globalEdgeSize){ pGrid[location] = pGrid[location - 1]; pGrid[location] = pGrid[location - 1]; } } __global__ void computeRHSGrid_kernel(float * fGridPtr, float * gGridPtr, float * rGridPtr, int innerEdgeSize){ extern __shared__ float shared_mem[]; float * sharedFGrid = (float*) shared_mem; float * sharedGGrid = (float*) &sharedFGrid[(blockDim.x+2)*(blockDim.x+2)]; int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridY > innerEdgeSize || gridX > innerEdgeSize){ return; } load_local_grid(fGridPtr, sharedFGrid, innerEdgeSize + 2); load_local_grid(gGridPtr, sharedGGrid, innerEdgeSize + 2); Grid fGrid(sharedFGrid, blockDim.x + 2); Grid gGrid(sharedGGrid, blockDim.x + 2); int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); //sharedUGrid and sharedVGrid should be (gridDim.x + 2)^2 rGridPtr[globalId] = computeRHS(threadIdx.x + 1, threadIdx.y + 1, fGrid, gGrid); } __global__ void computeRedCells_kernel(float * pGridPtr, float * rhsGridPtr, int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pressureGrid(pressure_cache, blockDim.x + 2); Grid rhsGrid(rhsGridPtr, blockDim.x + 2); //If even if(!((gridY * innerEdgeSize + gridX) & 1)){ int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); pGridPtr[globalId] = relax(threadIdx.x + 1, threadIdx.y + 1, pressureGrid, rhsGrid); } //Naive implementation } __global__ void computeBlackCells_kernel(float * pGridPtr, float * rhsGrdiPtr, int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pressureGrid(pressure_cache, blockDim.x + 2); Grid rhsGrid(rhsGrdiPtr, blockDim.x + 2); //If even if(((gridY * innerEdgeSize + gridX) & 1)){ int globalId = (gridY + 1)* (innerEdgeSize+2) + (gridX + 1); pGridPtr[globalId] = relax(threadIdx.x + 1, threadIdx.y + 1, pressureGrid, rhsGrid); } //Naive implementation } __global__ void computeUV_kernel(float *pGridPtr, float* uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr , int innerEdgeSize){ extern __shared__ float pressure_cache[]; //For pressure grid only int gridX = blockIdx.x * blockDim.x + threadIdx.x; //Coordinates inside the kernel grid int gridY = blockIdx.y * blockDim.y + threadIdx.y; if(gridX>=innerEdgeSize || gridY >= innerEdgeSize){ return; } load_local_grid(pGridPtr, pressure_cache, innerEdgeSize + 2); //First copy all the black cells in a grid with 2x size of the block Grid pGrid(pressure_cache, blockDim.x + 2); Grid uGrid(uGridPtr, blockDim.x + 2); Grid vGrid(vGridPtr, blockDim.x + 2); Grid fGrid(fGridPtr, blockDim.x + 2); Grid gGrid(gGridPtr, blockDim.x + 2); //No point in making separete inline functions for simple computations so add +1 for easier indexing //Note: pGrid is in shared memory and all the other grids are global memory uGrid(gridX, gridY) = fGrid(gridX, gridY) - ((deltaTime)/cellSize)* ( pGrid(threadIdx.x+1,threadIdx.y)-pGrid(threadIdx.x,threadIdx.y) ); vGrid(gridX, gridY) = gGrid(gridX,gridY) - ((deltaTime)/cellSize)* ( pGrid(threadIdx.x,threadIdx.y+1)-pGrid(threadIdx.x,threadIdx.y) ); } __global__ void setGridPoint_kernel(float * grid, float value, int point){ grid[point] = value; } //HOST FUNCTIONS //Note this is the true edge size void clear_staggered_grid_host( float * UGrid, float * VGrid, float * FGrid, float * GGrid, float * RHSGrid, float * PressureGrid, int globalEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(globalEdgeSize / threadsPerBlock.x + 1, globalEdgeSize / threadsPerBlock.y + 1, 1); clear_staggered_grid_kernel<<<numBlocks, threadsPerBlock>>>(UGrid, VGrid, FGrid, GGrid, RHSGrid, PressureGrid, globalEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } /*Sets the edges of the grid to null*/ void null_boundary_host(float * grid, int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); null_boundary_kernel<<<numBlocks, threadsPerBlock>>>(grid, (innerEdgeSize+2)); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } /* * Only runs on the cells not on the edges * */ void computeFGGrid_host(float * uGrid, float * vGrid, float * fGrid, float * gGrid, int innerEdgeSize){ //Copy all the boundaries dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); copyUVBoundaries_kernel<<<numBlocks, threadsPerBlock>>>(uGrid, vGrid, fGrid, gGrid, innerEdgeSize + 2); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); //Reuse threadsPerBlock and numBlocks threadsPerBlock.x = BLOCK_SIZE; threadsPerBlock.y = BLOCK_SIZE; numBlocks.x = innerEdgeSize / threadsPerBlock.x + 1; numBlocks.y = innerEdgeSize / threadsPerBlock.y + 1; computeFGGrid_kernel <<<numBlocks, threadsPerBlock, 2*(BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float)>>>(uGrid, vGrid, fGrid, gGrid, innerEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } void computeRHSGrid_host(float *fGrid, float *gGrid, float * rGrid, int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(innerEdgeSize / threadsPerBlock.x + 1, innerEdgeSize / threadsPerBlock.y + 1, 1); computeRHSGrid_kernel <<<numBlocks, threadsPerBlock, 2*(BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float)>>>(fGrid, gGrid, rGrid, innerEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); //Compute RHS } void doSorStep_host(float * pGrid, float * rhsGrid, int innerEdgeSize){ //Copy the edges of the inner grid to the edges of the outergrid //Aka set boundary conditions dim3 threadsPerBlock(BLOCK_SIZE * BLOCK_SIZE, 1 , 1); dim3 numBlocks((innerEdgeSize+2) / threadsPerBlock.x + 1, 1, 1); //__global__ void copyPressureBoundaries(float *pGrid, int globalEdgeSize){ copyPressureBoundaries<<<numBlocks, threadsPerBlock>>>(pGrid, innerEdgeSize + 2); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); //Reuse threadsPerBlock and numBlocks threadsPerBlock.x = BLOCK_SIZE; threadsPerBlock.y = BLOCK_SIZE; numBlocks.x = innerEdgeSize / threadsPerBlock.x + 1; numBlocks.y = innerEdgeSize / threadsPerBlock.y + 1; //Do the red checkerboard computeRedCells_kernel <<<numBlocks, threadsPerBlock, (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float)>>>(pGrid, rhsGrid, innerEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); //Do the red checkerboard computeBlackCells_kernel <<<numBlocks, threadsPerBlock, (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float)>>>(pGrid, rhsGrid, innerEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } void computeUV_host(float *pGrid, float *uGrid, float * vGrid, float *fGrid, float *gGrid, int innerEdgeSize){ //__global__ void computeUV_kernel(float *pGridPtr, float* uGridPtr, float * vGridPtr, float * fGridPtr, float * gGridPtr , int innerEdgeSize){ dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 numBlocks(innerEdgeSize / threadsPerBlock.x + 1, innerEdgeSize / threadsPerBlock.y + 1, 1); computeUV_kernel <<<numBlocks, threadsPerBlock, (BLOCK_SIZE+2)*(BLOCK_SIZE+2)*sizeof(float)>>>(pGrid, uGrid, vGrid, fGrid, gGrid, innerEdgeSize); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } void setGridPoint_host(float * grid, float value, int point){ //Most inefficient way to set a single value(sadly there isn't any other way) //must launch a single thread kernel setGridPoint_kernel<<<1, 1>>>(grid, value, point); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); } void devSetOmega(float omega_){ cudaMemcpyToSymbol(w, &omega_, sizeof(float)); } void devSetReynold(float reynold_){ cudaMemcpyToSymbol(reynold, &reynold_, sizeof(float)); } void devSetDeltaTime(float deltaTime_){ cudaMemcpyToSymbol(deltaTime, &deltaTime_, sizeof(float)); } void devSetCellSize(float cellSize_){ cudaMemcpyToSymbol(cellSize, &cellSize_, sizeof(float)); } void devSetGravity(float gravityX_, float gravityY_){ cudaMemcpyToSymbol(gravityX, &gravityX_, sizeof(float)); cudaMemcpyToSymbol(gravityY, &gravityY_, sizeof(float)); }
501b3694865e9d9ae1c99d7e2628aeb50b79bd93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <ATen/hip/HIPApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline bool devIoU(T const* const a, T const* const b, const float threshold) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(bottom - top, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS > threshold * (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 4; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 4, iou_threshold)) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } at::Tensor nms_cuda(const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); int dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { hipLaunchKernelGGL(( nms_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(hipGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
501b3694865e9d9ae1c99d7e2628aeb50b79bd93.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <c10/cuda/CUDAGuard.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include "cuda_helpers.h" #include <iostream> #include <vector> int const threadsPerBlock = sizeof(unsigned long long) * 8; template <typename T> __device__ inline bool devIoU(T const* const a, T const* const b, const float threshold) { T left = max(a[0], b[0]), right = min(a[2], b[2]); T top = max(a[1], b[1]), bottom = min(a[3], b[3]); T width = max(right - left, (T)0), height = max(bottom - top, (T)0); T interS = width * height; T Sa = (a[2] - a[0]) * (a[3] - a[1]); T Sb = (b[2] - b[0]) * (b[3] - b[1]); return interS > threshold * (Sa + Sb - interS); } template <typename T> __global__ void nms_kernel( const int n_boxes, const float iou_threshold, const T* dev_boxes, unsigned long long* dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ T block_boxes[threadsPerBlock * 4]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 4 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; block_boxes[threadIdx.x * 4 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; block_boxes[threadIdx.x * 4 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; block_boxes[threadIdx.x * 4 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const T* cur_box = dev_boxes + cur_box_idx * 4; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU<T>(cur_box, block_boxes + i * 4, iou_threshold)) { t |= 1ULL << i; } } const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } at::Tensor nms_cuda(const at::Tensor& dets, const at::Tensor& scores, float iou_threshold) { AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); at::cuda::CUDAGuard device_guard(dets.device()); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto dets_sorted = dets.index_select(0, order_t); int dets_num = dets.size(0); const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock); at::Tensor mask = at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); dim3 blocks(col_blocks, col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( dets_sorted.type(), "nms_kernel_cuda", [&] { nms_kernel<scalar_t><<<blocks, threads, 0, stream>>>( dets_num, iou_threshold, dets_sorted.data_ptr<scalar_t>(), (unsigned long long*)mask.data_ptr<int64_t>()); }); at::Tensor mask_cpu = mask.to(at::kCPU); unsigned long long* mask_host = (unsigned long long*)mask_cpu.data_ptr<int64_t>(); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data_ptr<int64_t>(); int num_to_keep = 0; for (int i = 0; i < dets_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long* p = mask_host + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } AT_CUDA_CHECK(cudaGetLastError()); return order_t.index( {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) .to(order_t.device(), keep.scalar_type())}); }
a2c7571eaaab1dada641e56440dabeeb9c2e8dfb.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include "Matrix_Test.h" #include "load_matrix.h" #include "dynamic_buffer.h" #include "spine.h" void Transitive_closure(const std::string &filename); void Matrix_Test(const std::string filename); void createStreams(); void createStreams(const int ID); void destroyStreams(); void destroyStreams(const int ID); #include "Transitive_Closure.inl" void createStreams(const int ID) { for(int i=0; i<NUM_STREAMS; i++) hipStreamCreate(&__multiStreams[ID][i]); } void destroyStreams(const int ID) { for(int i=0; i<NUM_STREAMS; i++) hipStreamDestroy(__multiStreams[ID][i]); } void createStreams() { for(int i=0; i<NUM_STREAMS; i++) hipStreamCreate(&__streams[i]); } void destroyStreams() { for(int i=0; i<NUM_STREAMS; i++) hipStreamDestroy(__streams[i]); } void Matrix_Test(const std::string filename, int table1_size, int table2_size, int ps1, int ps2, int q) { #if(MULTI_GPU == 1) //FillTests(filename); #else //Transitive_closure(filename, table1_size, table2_size, ps1, ps2, q); //Transitive_closure(filename, table1_size); #endif } //////////////////////////////////////////////////////////////////////////////// // Parse input file and run test //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { if(argc != 4) { fprintf(stderr, "Invalid input...\n"); //fprintf(stderr, "Usage: TC <filename template> table_size1 table_size2 partiiton_size1 partition_size2 query_value\n"); fprintf(stderr, "Usage: TC <input .csv file> tuple_count\n"); exit(1); } std::string filename(argv[1]); Transitive_closure(filename, atoi(argv[2]), atoi(argv[3])); //Transitive_closure(filename, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6])); } int main(int argc, char **argv) { fprintf(stderr, "TEST START\n"); runTest(argc, argv); fprintf(stderr, "TEST COMPLETE\n"); return 0; }
a2c7571eaaab1dada641e56440dabeeb9c2e8dfb.cu
#include<stdio.h> #include<stdlib.h> #include "Matrix_Test.h" #include "load_matrix.h" #include "dynamic_buffer.h" #include "spine.h" void Transitive_closure(const std::string &filename); void Matrix_Test(const std::string filename); void createStreams(); void createStreams(const int ID); void destroyStreams(); void destroyStreams(const int ID); #include "Transitive_Closure.inl" void createStreams(const int ID) { for(int i=0; i<NUM_STREAMS; i++) cudaStreamCreate(&__multiStreams[ID][i]); } void destroyStreams(const int ID) { for(int i=0; i<NUM_STREAMS; i++) cudaStreamDestroy(__multiStreams[ID][i]); } void createStreams() { for(int i=0; i<NUM_STREAMS; i++) cudaStreamCreate(&__streams[i]); } void destroyStreams() { for(int i=0; i<NUM_STREAMS; i++) cudaStreamDestroy(__streams[i]); } void Matrix_Test(const std::string filename, int table1_size, int table2_size, int ps1, int ps2, int q) { #if(MULTI_GPU == 1) //FillTests(filename); #else //Transitive_closure(filename, table1_size, table2_size, ps1, ps2, q); //Transitive_closure(filename, table1_size); #endif } //////////////////////////////////////////////////////////////////////////////// // Parse input file and run test //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { if(argc != 4) { fprintf(stderr, "Invalid input...\n"); //fprintf(stderr, "Usage: TC <filename template> table_size1 table_size2 partiiton_size1 partition_size2 query_value\n"); fprintf(stderr, "Usage: TC <input .csv file> tuple_count\n"); exit(1); } std::string filename(argv[1]); Transitive_closure(filename, atoi(argv[2]), atoi(argv[3])); //Transitive_closure(filename, atoi(argv[2]), atoi(argv[3]), atoi(argv[4]), atoi(argv[5]), atoi(argv[6])); } int main(int argc, char **argv) { fprintf(stderr, "TEST START\n"); runTest(argc, argv); fprintf(stderr, "TEST COMPLETE\n"); return 0; }
4a20d1f46d14b3fe5b78f8c279cb313251dea3c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //xfail:TIMEOUT //--gridDim=64 --blockDim=128 --warp-sync=32 #include "common.h" template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n); template __global__ void reduceMultiPass<128, true>(const float *g_idata, float *g_odata, unsigned int n); template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); }
4a20d1f46d14b3fe5b78f8c279cb313251dea3c2.cu
//xfail:TIMEOUT //--gridDim=64 --blockDim=128 --warp-sync=32 #include "common.h" template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n); template __global__ void reduceMultiPass<128, true>(const float *g_idata, float *g_odata, unsigned int n); template <unsigned int blockSize, bool nIsPow2> __global__ void reduceMultiPass(const float *g_idata, float *g_odata, unsigned int n) { reduceBlocks<blockSize, nIsPow2>(g_idata, g_odata, n); }
410284b193a3aa31ec024983733adc9c5a9ac9f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/device_intrinsics.h" #include "imgproc/segmentation/foreground_permutohedral_deduplicate.h" #include "imgproc/segmentation/crf_config.h" #include "imgproc/segmentation/crf_common.h" #include "imgproc/segmentation/permutohedral_common.h" #include <device_launch_parameters.h> #include <hip/device_functions.h> #include <math_functions.h> namespace surfelwarp { namespace device { enum { num_threads = 32 }; __global__ void foregroundDeduplicateHashedLatticeKernel( const unsigned* compacted_hash_offset, unsigned* sorted_lattice_index, const LatticeCoordKey<image_permutohedral_dim>* lattice_coord_key_array, ForegroundPermutohedralLatticePerHash* compacted_lattice_record, unsigned* global_duplicate_flag, unsigned* local_reduce_buffer ) { //Do not need check the range const auto hashed_key_index = blockIdx.x; //The begin and end in sorted_lattice_index const auto begin = compacted_hash_offset[hashed_key_index + 0]; const auto end = compacted_hash_offset[hashed_key_index + 1]; //The shared lattice coord is init by the first threads __shared__ LatticeCoordKey<image_permutohedral_dim> unique_lattice[max_lattice_per_hash]; __shared__ short num_unique_lattice_shared; if (threadIdx.x == 0) { for (auto i = 0; i < max_lattice_per_hash; i++) { unique_lattice[i].set_null(); } } //Sync here __syncthreads(); //Shared lattice for cooperative loading __shared__ LatticeCoordKey<image_permutohedral_dim> lattice[num_threads]; //Only thread 0 hold a valid value short num_unique_lattice = 0; //First loop, load all possible lattice in this range, check if unique for (auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; if (thread_i < end) { const auto index = sorted_lattice_index[thread_i]; lattice[threadIdx.x] = lattice_coord_key_array[index]; } else { lattice[threadIdx.x].set_null(); } //Sync here for cooperative loading __syncthreads(); //Let one thread check the uniqueness if (threadIdx.x == 0) { for (auto j = 0; j < num_threads; j++) { const auto lattice_for_check = lattice[j]; //bool null_lattice = lattice_for_check.is_null(); bool new_lattice = !(lattice_for_check.is_null()); //Checking loop for (auto k = 0; k < min(num_unique_lattice, max_lattice_per_hash); k++) { const auto existing_lattice = unique_lattice[k]; if (existing_lattice.less_than(lattice_for_check) == 0) { new_lattice = false; break; } } //Update it if (new_lattice) { unique_lattice[num_unique_lattice] = lattice_for_check; num_unique_lattice++; } } //End of checking loop } } //End of checking loop if (threadIdx.x == 0) { num_unique_lattice_shared = num_unique_lattice; } __syncthreads(); //Store the result and return if(num_unique_lattice_shared == 1) { //Construct the result ForegroundPermutohedralLatticePerHash lattice_record; lattice_record.num_lattice = 1; lattice_record.lattice_coord_key[0] = unique_lattice[0]; lattice_record.lattice_coord_key[1].set_null(); lattice_record.lattice_coord_offset[0].x = begin; lattice_record.lattice_coord_offset[0].y = end; if(threadIdx.x == 0) { compacted_lattice_record[hashed_key_index] = lattice_record; } return; //All threads will return } //At least 2 elements, set the flag *global_duplicate_flag = 1; __shared__ int index_offset; if(threadIdx.x == 0) { index_offset = 0; } //Construct the result ForegroundPermutohedralLatticePerHash lattice_record; lattice_record.num_lattice = num_unique_lattice_shared; //The main deduplicate loop for(auto lattice_idx = 0; lattice_idx < num_unique_lattice_shared; lattice_idx++) { //The lattice for this index const LatticeCoordKey<image_permutohedral_dim> curr_unique_lattice = unique_lattice[lattice_idx]; //Store input record lattice_record.lattice_coord_key[lattice_idx] = curr_unique_lattice; lattice_record.lattice_coord_offset[lattice_idx].x = begin + index_offset; //The main processing loop for(auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; LatticeCoordKey<image_permutohedral_dim> lattice_thread; int lattice_matched = 0; if (thread_i < end) { const auto index = sorted_lattice_index[thread_i]; lattice_thread = lattice_coord_key_array[index]; lattice_matched = (curr_unique_lattice.less_than(lattice_thread) == 0); } //Do a warp scan on matched int scanned_matched = lattice_matched; scanned_matched = warp_scan(scanned_matched); //Store it if(lattice_matched) { const auto thread_offset = begin + index_offset + scanned_matched - 1; local_reduce_buffer[thread_offset] = sorted_lattice_index[thread_i]; } //Increase on the global offset if(threadIdx.x == 31) { index_offset += scanned_matched; } __syncthreads(); } //Store the result lattice_record.lattice_coord_offset[lattice_idx].y = begin + index_offset; } //Copy the local reduce buffer back to sorted buffer for(auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; if(thread_i < end) { sorted_lattice_index[thread_i] = local_reduce_buffer[thread_i]; } } } }; /* End of namespace device */ }; /* End of namespace surfelwarp */ void surfelwarp::foregroundDeduplicateHashedLattice( const DeviceArray<unsigned>& compacted_hash_offset, DeviceArray<unsigned>& sorted_lattice_index, const DeviceArray<LatticeCoordKey<image_permutohedral_dim>>& lattice_coord_key_array, DeviceArray<ForegroundPermutohedralLatticePerHash>& compacted_lattice_record, unsigned* d_duplicate_flag, DeviceArray<unsigned>& deduplicate_reduce_buffer, hipStream_t stream ) { dim3 blk(32); dim3 grid(compacted_hash_offset.size() - 1); hipLaunchKernelGGL(( device::foregroundDeduplicateHashedLatticeKernel), dim3(grid), dim3(blk), 0, stream, compacted_hash_offset, sorted_lattice_index, lattice_coord_key_array, compacted_lattice_record, d_duplicate_flag, deduplicate_reduce_buffer ); //Sync and check error for sorting #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif }
410284b193a3aa31ec024983733adc9c5a9ac9f0.cu
#include "common/device_intrinsics.h" #include "imgproc/segmentation/foreground_permutohedral_deduplicate.h" #include "imgproc/segmentation/crf_config.h" #include "imgproc/segmentation/crf_common.h" #include "imgproc/segmentation/permutohedral_common.h" #include <device_launch_parameters.h> #include <device_functions.h> #include <math_functions.h> namespace surfelwarp { namespace device { enum { num_threads = 32 }; __global__ void foregroundDeduplicateHashedLatticeKernel( const unsigned* compacted_hash_offset, unsigned* sorted_lattice_index, const LatticeCoordKey<image_permutohedral_dim>* lattice_coord_key_array, ForegroundPermutohedralLatticePerHash* compacted_lattice_record, unsigned* global_duplicate_flag, unsigned* local_reduce_buffer ) { //Do not need check the range const auto hashed_key_index = blockIdx.x; //The begin and end in sorted_lattice_index const auto begin = compacted_hash_offset[hashed_key_index + 0]; const auto end = compacted_hash_offset[hashed_key_index + 1]; //The shared lattice coord is init by the first threads __shared__ LatticeCoordKey<image_permutohedral_dim> unique_lattice[max_lattice_per_hash]; __shared__ short num_unique_lattice_shared; if (threadIdx.x == 0) { for (auto i = 0; i < max_lattice_per_hash; i++) { unique_lattice[i].set_null(); } } //Sync here __syncthreads(); //Shared lattice for cooperative loading __shared__ LatticeCoordKey<image_permutohedral_dim> lattice[num_threads]; //Only thread 0 hold a valid value short num_unique_lattice = 0; //First loop, load all possible lattice in this range, check if unique for (auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; if (thread_i < end) { const auto index = sorted_lattice_index[thread_i]; lattice[threadIdx.x] = lattice_coord_key_array[index]; } else { lattice[threadIdx.x].set_null(); } //Sync here for cooperative loading __syncthreads(); //Let one thread check the uniqueness if (threadIdx.x == 0) { for (auto j = 0; j < num_threads; j++) { const auto lattice_for_check = lattice[j]; //bool null_lattice = lattice_for_check.is_null(); bool new_lattice = !(lattice_for_check.is_null()); //Checking loop for (auto k = 0; k < min(num_unique_lattice, max_lattice_per_hash); k++) { const auto existing_lattice = unique_lattice[k]; if (existing_lattice.less_than(lattice_for_check) == 0) { new_lattice = false; break; } } //Update it if (new_lattice) { unique_lattice[num_unique_lattice] = lattice_for_check; num_unique_lattice++; } } //End of checking loop } } //End of checking loop if (threadIdx.x == 0) { num_unique_lattice_shared = num_unique_lattice; } __syncthreads(); //Store the result and return if(num_unique_lattice_shared == 1) { //Construct the result ForegroundPermutohedralLatticePerHash lattice_record; lattice_record.num_lattice = 1; lattice_record.lattice_coord_key[0] = unique_lattice[0]; lattice_record.lattice_coord_key[1].set_null(); lattice_record.lattice_coord_offset[0].x = begin; lattice_record.lattice_coord_offset[0].y = end; if(threadIdx.x == 0) { compacted_lattice_record[hashed_key_index] = lattice_record; } return; //All threads will return } //At least 2 elements, set the flag *global_duplicate_flag = 1; __shared__ int index_offset; if(threadIdx.x == 0) { index_offset = 0; } //Construct the result ForegroundPermutohedralLatticePerHash lattice_record; lattice_record.num_lattice = num_unique_lattice_shared; //The main deduplicate loop for(auto lattice_idx = 0; lattice_idx < num_unique_lattice_shared; lattice_idx++) { //The lattice for this index const LatticeCoordKey<image_permutohedral_dim> curr_unique_lattice = unique_lattice[lattice_idx]; //Store input record lattice_record.lattice_coord_key[lattice_idx] = curr_unique_lattice; lattice_record.lattice_coord_offset[lattice_idx].x = begin + index_offset; //The main processing loop for(auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; LatticeCoordKey<image_permutohedral_dim> lattice_thread; int lattice_matched = 0; if (thread_i < end) { const auto index = sorted_lattice_index[thread_i]; lattice_thread = lattice_coord_key_array[index]; lattice_matched = (curr_unique_lattice.less_than(lattice_thread) == 0); } //Do a warp scan on matched int scanned_matched = lattice_matched; scanned_matched = warp_scan(scanned_matched); //Store it if(lattice_matched) { const auto thread_offset = begin + index_offset + scanned_matched - 1; local_reduce_buffer[thread_offset] = sorted_lattice_index[thread_i]; } //Increase on the global offset if(threadIdx.x == 31) { index_offset += scanned_matched; } __syncthreads(); } //Store the result lattice_record.lattice_coord_offset[lattice_idx].y = begin + index_offset; } //Copy the local reduce buffer back to sorted buffer for(auto i = begin; i < end; i += num_threads) { //Cooperative loading const auto thread_i = i + threadIdx.x; if(thread_i < end) { sorted_lattice_index[thread_i] = local_reduce_buffer[thread_i]; } } } }; /* End of namespace device */ }; /* End of namespace surfelwarp */ void surfelwarp::foregroundDeduplicateHashedLattice( const DeviceArray<unsigned>& compacted_hash_offset, DeviceArray<unsigned>& sorted_lattice_index, const DeviceArray<LatticeCoordKey<image_permutohedral_dim>>& lattice_coord_key_array, DeviceArray<ForegroundPermutohedralLatticePerHash>& compacted_lattice_record, unsigned* d_duplicate_flag, DeviceArray<unsigned>& deduplicate_reduce_buffer, cudaStream_t stream ) { dim3 blk(32); dim3 grid(compacted_hash_offset.size() - 1); device::foregroundDeduplicateHashedLatticeKernel<<<grid, blk, 0, stream>>>( compacted_hash_offset, sorted_lattice_index, lattice_coord_key_array, compacted_lattice_record, d_duplicate_flag, deduplicate_reduce_buffer ); //Sync and check error for sorting #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif }
0bbc550c29dd5a9243c919d996dc503a5c5645f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define GRID_HEIGHT (MATRIX_SIZE / BLOCK_SIZE) #define GRID_WIDTH (GRID_HEIGHT) __global__ void matrixMulCUDA(float *C, float *A, float *B) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = MATRIX_SIZE * BLOCK_SIZE * by; int aEnd = aBegin + MATRIX_SIZE - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * MATRIX_SIZE; float C_local = 0; // macierze na ktrych wykonujemy obliczenia __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // macierze do ktrych rwnolegle z obliczeniami wpisujemy dane kolejnych blokw __shared__ float A_shared[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float B_shared[BLOCK_SIZE][BLOCK_SIZE]; A_shared[ty][tx] = A[aBegin + MATRIX_SIZE * ty + tx]; B_shared[ty][tx] = B[bBegin + MATRIX_SIZE * ty + tx]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { As[ty][tx] = A_shared[ty][tx]; Bs[ty][tx] = B_shared[ty][tx]; __syncthreads(); //Pobranie danych do pamici wspdzielonej wtkw (potrzebne do nastpnej iteracji) A_shared[ty][tx] = A[a + MATRIX_SIZE * ty + tx]; B_shared[ty][tx] = B[b + MATRIX_SIZE * ty + tx]; #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { //Kolumna razy wiersz //Obliczanie wyniku czciowego C_local += As[ty][k] * Bs[k][tx]; } __syncthreads(); } //Zapis wynikw do pamici int c = MATRIX_SIZE * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + MATRIX_SIZE * ty + tx] = C_local; } // wywoanie dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(GRID_WIDTH, GRID_HEIGHT);hipLaunchKernelGGL(( matrixMulCUDA), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B);
0bbc550c29dd5a9243c919d996dc503a5c5645f2.cu
#define GRID_HEIGHT (MATRIX_SIZE / BLOCK_SIZE) #define GRID_WIDTH (GRID_HEIGHT) __global__ void matrixMulCUDA(float *C, float *A, float *B) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = MATRIX_SIZE * BLOCK_SIZE * by; int aEnd = aBegin + MATRIX_SIZE - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * MATRIX_SIZE; float C_local = 0; // macierze na których wykonujemy obliczenia __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // macierze do których równolegle z obliczeniami wpisujemy dane kolejnych bloków __shared__ float A_shared[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float B_shared[BLOCK_SIZE][BLOCK_SIZE]; A_shared[ty][tx] = A[aBegin + MATRIX_SIZE * ty + tx]; B_shared[ty][tx] = B[bBegin + MATRIX_SIZE * ty + tx]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { As[ty][tx] = A_shared[ty][tx]; Bs[ty][tx] = B_shared[ty][tx]; __syncthreads(); //Pobranie danych do pamięci współdzielonej wątków (potrzebne do następnej iteracji) A_shared[ty][tx] = A[a + MATRIX_SIZE * ty + tx]; B_shared[ty][tx] = B[b + MATRIX_SIZE * ty + tx]; #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { //Kolumna razy wiersz //Obliczanie wyniku częściowego C_local += As[ty][k] * Bs[k][tx]; } __syncthreads(); } //Zapis wyników do pamięci int c = MATRIX_SIZE * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + MATRIX_SIZE * ty + tx] = C_local; } // wywołanie dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(GRID_WIDTH, GRID_HEIGHT); matrixMulCUDA<<< grid, threads >>>(d_C, d_A, d_B);
fdd2cfb14e8021127286f80dc44263a6ef87da83.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define CASENAME "Stest" #define BLOCKSIZEX 128 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define XDIM 128 #define YDIM 160 #define ZDIM 1 #define TMAX 4000 #define STARTF 0 #define OBSTR1 4.f #define OBSTX1 63.5f #define OBSTY1 47.5f #define OBSTZ1 31.5f #define OBSTR2 4.f #define OBSTX2 63.5f #define OBSTY2 31.5f #define OBSTZ2 31.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 31.75f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 23.75f #define YLRDIM 128 #define LRZ0 -0.25f #define ZLRDIM 2 //#define LRFACTOR 0.25f //#define LRLEVEL 4 //#define LRX0 31.625f //minimum x coord of LR //#define XLRDIM 256 //number of nodes in x //#define LRY0 23.625f //#define YLRDIM 256 //#define LRZ0 -0.375f //#define ZLRDIM 4 #define RE 100.f//2000.f//100.f; #define UMAX 0.08f #define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE #define REFINEMENT "YES" //YES,NO #define SmagLES "NO" //YES,NO #define MODEL "MRT" //BGK,MRT,STREAM #define ZPERIODIC "YES" #define VELAV "YES" #define START_VELAV 10000 #define START_VELFLUC 100000 #define CS 0.01f //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> /* Image List: 0 fluid 1 BB 2 3 DirichletWest(simple) 10 BB(force) 13 DirichletWest_Reg 14 NeumannEast_Reg 15 DirichletNorth_Reg 16 DirichletSouth_Reg 21 ysymmetry_top 22 ysymmetry_bot 23 zsymmetry_top 24 zsymmetry_bot 25 xsymmetry_top 26 xsymmetry_bot */ inline __device__ int ImageFcn(float x, float y, float z){ // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // return 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // return 10; //if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1) // { // return 10; // } // else // //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f) // if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f) // return 1; // else if(x < 17.5f) // return 13; // else if(x > 78.5f) // return 14; // else if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) return 10; else return 0; } inline __device__ int ImageFcn(int x, int y, int z){ int value = 0; //Cylinder // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; //Sphere // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1) // { //// if(z == 0 || z == ZDIM-1) //// return 1; //// else // return 10; // } // if(z == 0) // value = 0; // else if(z == ZDIM-1) // value = 0; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) value = 10; else if(y == 0) value = 200;//22; else if(y == YDIM-1) value = 100; else if(x == 0) value = 26; else if(x == XDIM-1) value = 25; else if(z == 0) value = 0; else if(z == ZDIM-1) value = 0; return value; //Lid Driven Cavity // if(x == XDIM-1 || y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1) // return 1; // else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2) // return 1; // else if(x == 0) // return 1; // else if(x == 1) // return 53; // else // return 0; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); // return 1.f; } __device__ void DirichletWest(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(y == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(y == YDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float u,v,w;//,rho; u = UMAX;//*PoisProf(zcoord)*1.5; v = 0.0f; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); //// f0 = 1.0f/3.0f*(rho-1.5f*usqr); // f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); //// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); //// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); //// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); //// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); //// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); //// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); //// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); //// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); //// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); //// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); //// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); //// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); //// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __device__ void DirichletWest_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX;//*PoisProf(y)*1.5; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; // f1 =(0.166666667f*u)+ // (f3-(-(0.166666667f*u))); f1 = f3+0.33333333f*u; // f5 =(0.0833333333f*( u+v))+ // (f7-(0.0833333333f*(-u-v))); f5 = f7+0.166666667f*(u+v); // f8 =(0.0833333333f*( u-v ))+ // (f6-(0.0833333333f*(-u+v ))); f8 = f6+0.166666667f*(u-v); // f10=(0.0833333333f*( u+w))+ // (f17-(0.0833333333f*(-u-w))); f10= f17+0.166666667f*(u+w); // f15=(0.0833333333f*( u-w))+ // (f12-(0.0833333333f*(-u+w))); f15= f12+0.166666667f*(u-w); // f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+ // (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)); // f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+ // (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)); // f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+ // (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)); // f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+ // (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)); // f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+ // (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)); // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float PI11 = 0; float PI12 = 0; float PI22 = 0; float PI33 = 0; float PI13 = 0; float PI23 = 0; float u;//,v;//,w;//,rho; u = UMAX;//*PoisProf(z)*1.5; //v = 0.0f; //w = 0.0f; float usqr = u*u;//+v*v+w*w; float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho -1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho -1.5f*usqr); float feq9 = 0.0555555556f*(rho -1.5f*usqr); float feq14 = 0.0555555556f*(rho -1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq11 = 0.0277777778f*(rho -1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq13 = 0.0277777778f*(rho -1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq16 = 0.0277777778f*(rho -1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq18 = 0.0277777778f*(rho -1.5f*usqr); // float feq0 = 0.3333333333f*(rho-1.5f*usqr); // float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); // float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); // float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); // float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); // float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); // float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); // float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); // float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); // float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); // float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); // float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f1 = feq1 +f3 -feq3 ; f5 = feq5 +f7 -feq7 ; f8 = feq8 +f6 -feq6 ; f10= feq10+f17-feq17; f15= feq15+f12-feq12; PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; } void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; f13 = f11; f18 = f16; f8 = f5; } else if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float PI11 = 0; float PI12 = 0; float PI22 = 0; float PI33 = 0; float PI13 = 0; float PI23 = 0; float u;//,v;//,w;//,rho; float rho = 1.0f; //v = 0.0f; //w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i float usqr = u*u;//+v*v+w*w; float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho -1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho -1.5f*usqr); float feq9 = 0.0555555556f*(rho -1.5f*usqr); float feq14 = 0.0555555556f*(rho -1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq11 = 0.0277777778f*(rho -1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq13 = 0.0277777778f*(rho -1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq16 = 0.0277777778f*(rho -1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq18 = 0.0277777778f*(rho -1.5f*usqr); // float feq0 = 0.3333333333f*(rho-1.5f*usqr); // float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); // float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); // float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); // float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr); // float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr); // float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr); // float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr); // float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); // float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr); // float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr); // float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f3 = feq3 +f1 -feq1 ; f7 = feq7 +f5 -feq5 ; f6 = feq6 +f8 -feq8 ; f17= feq17+f10-feq10; f12= feq12+f15-feq15; PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; } __device__ void NeumannEast(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; // f6 = f7; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; // f7 = f6; f13 = f11; f18 = f16; f8 = f5; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; // f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; // f17 = f12; f18 = f13; } } float u,v,w;//,rho; float rho = 1.0f; v = 0.0f; w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i float u2 = u*u; float v2 = v*v; float w2 = w*w; float usqr = u2+v2+w2; // f3 = f1 -0.333333333f*u; // f7 = f5 -0.166666667f*(u+v); // f6 = f8 -0.166666667f*(u-v); // f17= f10-0.166666667f*(u+w); // f12= f15-0.166666667f*(u-w); f0 = 1.0f/3.0f*(rho-1.5f*usqr); f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __device__ void NeumannEast_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; // f6 = f7; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; // f7 = f6; f13 = f11; f18 = f16; f8 = f5; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; // f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; // f17 = f12; f18 = f13; } } float u,v,w;//,rho; float rho = 1.0f; v = 0.0f; w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; f3 = f1 -0.333333333f*u; f7 = f5 -0.166666667f*(u+v); f6 = f8 -0.166666667f*(u-v); f17= f10-0.166666667f*(u+w); f12= f15-0.166666667f*(u-w); // f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+ // (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)); // f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+ // (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)); // f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+ // (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)); // f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+ // (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)); // f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+ // (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)); // f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+ // (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)); // f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+ // (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)); // f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+ // (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)); // f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+ // (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)); // f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+ // (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)); // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(x == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(x == XDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; // f1 =(0.166666667f*u)+ // (f3-(-(0.166666667f*u))); f4 = f2-0.33333333f*v; // f5 =(0.0833333333f*( u+v))+ // (f7-(0.0833333333f*(-u-v))); f7 = f5-0.166666667f*(u+v); // f8 =(0.0833333333f*( u-v ))+ // (f6-(0.0833333333f*(-u+v ))); f8 = f6+0.166666667f*(u-v); // f10=(0.0833333333f*( u+w))+ // (f17-(0.0833333333f*(-u-w))); f13= f16-0.166666667f*(v-w); // f15=(0.0833333333f*( u-w))+ // (f12-(0.0833333333f*(-u+w))); f18= f11-0.166666667f*(v+w); // //float feq0 = 0.1904761791f*rho+-0.597127747f*usqr //float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; //float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; //float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; // // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(x == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(x == XDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; f2 = f4 +0.33333333f*v; f5 = f7 +0.166666667f*(u+v); f6 = f8 -0.166666667f*(u-v); f16= f13+0.166666667f*(v-w); f11= f18+0.166666667f*(v+w); // //float feq0 = 0.1904761791f*rho+-0.597127747f*usqr //float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; //float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; //float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; // // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void xsymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } } f1 = f3 ; f5 = f6 ; f8 = f7 ; f10= f12; f15= f17; } __device__ void xsymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; f13 = f11; f18 = f16; f8 = f5; } // else if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } } f3 = f1 ; f6 = f5 ; f7 = f8 ; f12= f10; f17= f15; } __device__ void ysymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int z) { if(z == 0){ f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } if(z == ZDIM-1){ f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } __device__ void ysymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int z) { if(z == 0){ f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } if(z == ZDIM-1){ f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } __device__ void zsymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y) { if(y == 0){ f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } if(y == YDIM-1){ f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } __device__ void zsymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y) { if(y == 0){ f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } if(y == YDIM-1){ f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } inline __device__ void boundaries(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z, int im) { // if(im == 3)//DirichletWest // { // DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 53)//DirichletWest { //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 54)//DirichletWest { //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } // if(im == 4)//DirichletWest // { // NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // if(im == 13)//DirichletWest // { // DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 14)//DirichletWest // { // NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 15)//DirichletNorth // { // DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // if(im == 16)//DirichletSouth // { // DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 21)//ysymm top { ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 22)//ysymm bot { ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 23)//zsymm top { zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 24)//zsymm bot { zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } } inline __device__ void boundaries_force(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z, int im) { // if(im == 3)//DirichletWest // { // DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 53)//DirichletWest { DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); //DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 54)//DirichletWest { NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); //NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } // else if(im == 15)//DirichletNorth // { // DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 16)//DirichletSouth // { // DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } else if(im == 21)//ysymm top { ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 22)//ysymm bot { ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 23)//zsymm top { zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 24)//zsymm bot { zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 25)//zsymm top { xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 26)//zsymm bot { xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } } texture<float,2,hipReadModeElementType> texRef_f0A; texture<float,2,hipReadModeElementType> texRef_f1A; texture<float,2,hipReadModeElementType> texRef_f2A; texture<float,2,hipReadModeElementType> texRef_f3A; texture<float,2,hipReadModeElementType> texRef_f4A; texture<float,2,hipReadModeElementType> texRef_f5A; texture<float,2,hipReadModeElementType> texRef_f6A; texture<float,2,hipReadModeElementType> texRef_f7A; texture<float,2,hipReadModeElementType> texRef_f8A; texture<float,2,hipReadModeElementType> texRef_f9A; texture<float,2,hipReadModeElementType> texRef_f10A; texture<float,2,hipReadModeElementType> texRef_f11A; texture<float,2,hipReadModeElementType> texRef_f12A; texture<float,2,hipReadModeElementType> texRef_f13A; texture<float,2,hipReadModeElementType> texRef_f14A; texture<float,2,hipReadModeElementType> texRef_f15A; texture<float,2,hipReadModeElementType> texRef_f16A; texture<float,2,hipReadModeElementType> texRef_f17A; texture<float,2,hipReadModeElementType> texRef_f18A; texture<float,2,hipReadModeElementType> texRef_f0B; texture<float,2,hipReadModeElementType> texRef_f1B; texture<float,2,hipReadModeElementType> texRef_f2B; texture<float,2,hipReadModeElementType> texRef_f3B; texture<float,2,hipReadModeElementType> texRef_f4B; texture<float,2,hipReadModeElementType> texRef_f5B; texture<float,2,hipReadModeElementType> texRef_f6B; texture<float,2,hipReadModeElementType> texRef_f7B; texture<float,2,hipReadModeElementType> texRef_f8B; texture<float,2,hipReadModeElementType> texRef_f9B; texture<float,2,hipReadModeElementType> texRef_f10B; texture<float,2,hipReadModeElementType> texRef_f11B; texture<float,2,hipReadModeElementType> texRef_f12B; texture<float,2,hipReadModeElementType> texRef_f13B; texture<float,2,hipReadModeElementType> texRef_f14B; texture<float,2,hipReadModeElementType> texRef_f15B; texture<float,2,hipReadModeElementType> texRef_f16B; texture<float,2,hipReadModeElementType> texRef_f17B; texture<float,2,hipReadModeElementType> texRef_f18B; texture<float,2,hipReadModeElementType> texRef_f0C; texture<float,2,hipReadModeElementType> texRef_f1C; texture<float,2,hipReadModeElementType> texRef_f2C; texture<float,2,hipReadModeElementType> texRef_f3C; texture<float,2,hipReadModeElementType> texRef_f4C; texture<float,2,hipReadModeElementType> texRef_f5C; texture<float,2,hipReadModeElementType> texRef_f6C; texture<float,2,hipReadModeElementType> texRef_f7C; texture<float,2,hipReadModeElementType> texRef_f8C; texture<float,2,hipReadModeElementType> texRef_f9C; texture<float,2,hipReadModeElementType> texRef_f10C; texture<float,2,hipReadModeElementType> texRef_f11C; texture<float,2,hipReadModeElementType> texRef_f12C; texture<float,2,hipReadModeElementType> texRef_f13C; texture<float,2,hipReadModeElementType> texRef_f14C; texture<float,2,hipReadModeElementType> texRef_f15C; texture<float,2,hipReadModeElementType> texRef_f16C; texture<float,2,hipReadModeElementType> texRef_f17C; texture<float,2,hipReadModeElementType> texRef_f18C; texture<float,2,hipReadModeElementType> texRef_f0D; texture<float,2,hipReadModeElementType> texRef_f1D; texture<float,2,hipReadModeElementType> texRef_f2D; texture<float,2,hipReadModeElementType> texRef_f3D; texture<float,2,hipReadModeElementType> texRef_f4D; texture<float,2,hipReadModeElementType> texRef_f5D; texture<float,2,hipReadModeElementType> texRef_f6D; texture<float,2,hipReadModeElementType> texRef_f7D; texture<float,2,hipReadModeElementType> texRef_f8D; texture<float,2,hipReadModeElementType> texRef_f9D; texture<float,2,hipReadModeElementType> texRef_f10D; texture<float,2,hipReadModeElementType> texRef_f11D; texture<float,2,hipReadModeElementType> texRef_f12D; texture<float,2,hipReadModeElementType> texRef_f13D; texture<float,2,hipReadModeElementType> texRef_f14D; texture<float,2,hipReadModeElementType> texRef_f15D; texture<float,2,hipReadModeElementType> texRef_f16D; texture<float,2,hipReadModeElementType> texRef_f17D; texture<float,2,hipReadModeElementType> texRef_f18D; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr)); // f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); // f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); // f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); // f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); // f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); // f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); // f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); // f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); // f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); // f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); // f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); // f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); // f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); // f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); // f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); // f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } inline __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float u,v,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; float usqr = u*u+v*v+w*w; // u = rho*u; // v = rho*v; // w = rho*w; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //COMPUTE M-MEQ //m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; //m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; //m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); // m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); // m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); // m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; m13 = f5+-f6+ f7+-f8 -u*v; m14 = f11 +- f13 + - f16 + f18 -v*w; m15 = f10 + - f12 +-f15 + f17 -u*w; m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; if(SmagLES == "YES"){ //// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9)); //// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11))); //// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11))); // float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9; // float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); // //float Cs = 0.01f; // omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f); // //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR); // //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f; float tau0 = 1.f/omega; //float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR); //float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR); //omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f); //float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q)); float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q)); omega = 1.f/tau; //float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f; //omega = 1.f/tau; } f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega, float Cs) { float u,v,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; float usqr = u*u+v*v+w*w; // u = rho*u; // v = rho*v; // w = rho*w; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //COMPUTE M-MEQ //m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; //m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; //m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); // m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); // m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); // m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; m13 = f5+-f6+ f7+-f8 -u*v; m14 = f11 +- f13 + - f16 + f18 -v*w; m15 = f10 + - f12 +-f15 + f17 -u*w; m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; if(SmagLES == "YES"){ // float PI11 = -0.026315789f*m1-0.5f *omega*m9; // float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // // float PI12 = -1.5f*omega*m13; // float PI23 = -1.5f*omega*m14; // float PI13 = -1.5f*omega*m15; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); // omega = 1.0f/(1.0f/omega+3.f*CS*Smag); // float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9)); // float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11))); // float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11))); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13); // omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // //float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS); // ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag); // //float tau0 = 1.f/omega; //float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f; //omega = 1.f/tau; float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float nu0 = ((1.0f/omega)-0.5f)/3.0f; float tau0 = 1.f/omega; //float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR); //float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS); //omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f); float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q)); omega = 1.f/tau; } f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ void vel_av(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, int t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float& ufluc, float& vfluc, int t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float SF) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float SF) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w); //float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w); //float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); //float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); //float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); //float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); //float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); //float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); //float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); //float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; //float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); //float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; //float m13 = f5+-f6+ f7+-f8 -u*v; //float m14 = f11 +- f13 + - f16 + f18 -v*w; //float m15 = f10 + - f12 +-f15 + f17 -u*w; //float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; //float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; //float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); float m13 = f5+-f6+ f7+-f8 -u*v; float m14 = f11 +- f13 + - f16 + f18 -v*w; float m15 = f10 + - f12 +-f15 + f17 -u*w; float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m1-0.5f *omega*m9; float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); float PI12 = -1.5f*omega*m13; float PI23 = -1.5f*omega*m14; float PI13 = -1.5f*omega*m15; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f); //omega = 1.0f/(1.0f/omega+3.f*CS*Smag); //omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR); //omega = 1.0f/(1.0f/omega +3.f*CS*Smag); //omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR); //omega = 1.0f/(1.0f/omega +3.f*CS*Smag); //omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR); //float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); //float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); //float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); //float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); // //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; //float tau0c = 1.f/omega; //float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh //omega = 1.f/tau;//total omega on coarse mesh //tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q)); //omega2= 1.f/tau; SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2/LRFACTOR);//for post-collision //SF = omega*0.5f/omega2;//for post-streaming, pre-collision? f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega, float omega2) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); float m13 = f5+-f6+ f7+-f8 -u*v; float m14 = f11 +- f13 + - f16 + f18 -v*w; float m15 = f10 + - f12 +-f15 + f17 -u*w; //float PI11 = -0.026315789f*m1-0.5f *omega*m9; //float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); //float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); //float PI12 = -1.5f*omega*m13; //float PI23 = -1.5f*omega*m14; //float PI13 = -1.5f*omega*m15; ////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2) //float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; ////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f); ////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR); ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f)); ////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR); ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f)); //float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); //float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); //float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); //float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); // //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; //float tau0f = 1.f/omega2; //float tau0c = 1.f/omega; //float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine //omega2 = 1.f/tau;//total omega on fine mesh //tau = LRLEVEL*(tau-tau0f)+tau0c; //omega= 1.f/tau; //tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q)); float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2/LRFACTOR); //float SF = omega2*2.f/omega; //float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2)); //SF = omega*2.f/omega2; f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*ZLRDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } __global__ void simple_copy(float* fA, float* fB, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int k = dmin(x+1,XDIM)+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = fA[k];//+0.01f; } __global__ void simple_text(float* fA, float* fB, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = tex2D(texRef_f0A,x+1,y);//+0.01f; } __global__ void ExtractFromC_d(float* fout, size_t pitch, float omega, float omega2)//pitch in elements //size_t pitch, float SF)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(x < LRX0+1 || x > LRX0+XLRDIM-2 || y < LRY0+1 || y > LRY0+YLRDIM-2 || z < LRZ0+1 || z > LRZ0+ZLRDIM-2) // //if(x < LRX0+2 || x > LRX0+XLRDIM-3 || y < LRY0+2 || y > LRY0+YLRDIM-3 || z < LRZ0+2 || z > LRZ0+ZLRDIM-3) // { // //do nothing // } // else{ // if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+ZLRDIM*LRFACTOR-1) && // (x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1) || z == int(LRZ0+2) || z == int(LRY0+ZLRDIM*LRFACTOR-1)) ) //if( (x > LRX0+3 && x < LRX0+XLRDIM*LRFACTOR-3 && y > LRY0+3 && y < LRY0+YLRDIM*LRFACTOR-3))// && if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1))// && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) ) { // if(x > 10 && y > 10 && z > 10 && x < 20 && y < 20 && z < 20) // { float xcoord = LRLEVEL*(x-LRX0)+0.5f; float ycoord = LRLEVEL*(y-LRY0)+0.5f; float zcoord = LRLEVEL*(z-LRZ0); int zminus = int(zcoord); int zplus = zminus+1; f0 = (zplus-zcoord)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zplus)); if(MODEL == "MRT") mrt_scale_fc_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,omega2); //mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); // else // bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void LR_d_ABCD_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t, float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // sumX[threadIdx.x]=0.f; // sumY[threadIdx.x]=0.f; // sumZ[threadIdx.x]=0.f; // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB if(im == 10){ check[0] = 1; //sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; //sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; //sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) syncthreads(); if(check[0] == 1 && t>=STARTF){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } } __global__ void LR_d_BACD_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // sumX[threadIdx.x]=0.f; // sumY[threadIdx.x]=0.f; // sumZ[threadIdx.x]=0.f; // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB if(im == 10){ check[0] = 1; //sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; //sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; //sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) syncthreads(); if(check[0] == 1 && t>=STARTF){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } } __global__ void LR_d_ABCD2(float* fin, float* fout, float omega, size_t pitch, int n, int t,//pitch in elements float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_ABDC2(float* fin, float* fout, float omega, size_t pitch, float SF, int n, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation // int zminus = int(zcoord); // int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //no interp // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } //else f18 = 0.1f; if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_ABDC_Interp(float* fin, float* fout, float omega, size_t pitch, float SF, int t, float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation int zminus = int(zcoord); int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; if(((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL) && ZPERIODIC == "NO") ||((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL) && ZPERIODIC == "YES")) { if(ZPERIODIC == "YES"){ if(zcoord < 0){ //if zcoord=-0.25f, 1+zcoord=0.75f f0 = (1.f+zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f2 = (1.f+zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f4 = (1.f+zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f9 = (1.f+zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f11= (1.f+zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f13= (1.f+zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f14= (1.f+zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f16= (1.f+zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f18= (1.f+zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f1 = (1.f+zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f3 = (1.f+zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f5 = (1.f+zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f6 = (1.f+zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f7 = (1.f+zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f8 = (1.f+zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f15= (1.f+zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f17= (1.f+zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f10= (1.f+zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f12= (1.f+zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); } else if(zcoord > ZDIM-1){ f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); } else{ //interpolate for next time step. from B //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } } else{ //interpolate for next time step. from B //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } if(MODEL == "MRT") mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); else bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } //else f18 = 0.1f; if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+1-LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+1-LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } }//end else (not at edge of LR) } __global__ void LR_d_BADC2(float* fin, float* fout, float omega, size_t pitch, float SF, int n, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation // int zminus = int(zcoord); // int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //no interp // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_BADC_Interp(float* fin, float* fout, float omega, size_t pitch, float SF, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation int zminus = int(zcoord); int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); if(((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL) && ZPERIODIC == "NO") ||((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL) && ZPERIODIC == "YES")) { if(ZPERIODIC == "YES"){ if(zcoord < 0){ //if zcoord=-0.25f, 1+zcoord=0.75f f0 = (1.f+zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f2 = (1.f+zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f4 = (1.f+zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f9 = (1.f+zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f11= (1.f+zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f13= (1.f+zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f14= (1.f+zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f16= (1.f+zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f18= (1.f+zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f1 = (1.f+zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f3 = (1.f+zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f5 = (1.f+zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f6 = (1.f+zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f7 = (1.f+zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f8 = (1.f+zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f15= (1.f+zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f17= (1.f+zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f10= (1.f+zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f12= (1.f+zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); } else if(zcoord > ZDIM-1){ f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); } else{ //interpolate for next time step. from A //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } } else{ //interpolate for next time step. from A //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } if(MODEL == "MRT") mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); else bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+1-LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+1-LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } }//end else (not at edge of LR) } __global__ void LR_d_ABCD(float* fin, float* fout, float omega, size_t pitch,//pitch in elements int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3) // im = -1;//not valid for extraction // if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2) // { // im = -2;//not valid for second TS // } // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_BACD(float* fin, float* fout, float omega, size_t pitch, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); // if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3) // im = -1;//not valid for extraction // if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2) // { // im = -2;//not valid for second TS // } // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void mrt_d_single_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y;//; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); float u_Av, v_Av, u_fluc, v_fluc; // if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 && // y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && // z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 || // (x>XDIM-1))) // { // } // else{ int im = ImageFcn(x,y,z); f0 = fin[j]; f1 = fin[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fin[f_mem(10,x-1,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f12= fin[f_mem(12,x+1,y ,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_mem(9 ,x ,y ,ZDIM-1,pitch)]; f10= fin[f_mem(10,dmax_p(x-1,XDIM),y ,ZDIM-1,pitch)]; f11= fin[f_mem(11,x ,dmax_p(y-1,YDIM),ZDIM-1,pitch)]; f12= fin[f_mem(12,dmin_p(x+1,XDIM),y ,ZDIM-1,pitch)]; f13= fin[f_mem(13,x ,dmin_p(y+1,YDIM),ZDIM-1,pitch)]; } if(z != ZDIM-1){ f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f15= fin[f_mem(15,x-1,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f17= fin[f_mem(17,x+1,y ,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_mem(14,x ,y ,0 ,pitch)]; f15= fin[f_mem(15,dmax_p(x-1,XDIM),y ,0 ,pitch)]; f16= fin[f_mem(16,x ,dmax_p(y-1,YDIM),0 ,pitch)]; f17= fin[f_mem(17,dmin_p(x+1,XDIM),y ,0 ,pitch)]; f18= fin[f_mem(18,x ,dmin_p(y+1,YDIM),0 ,pitch)]; } if(im == 1 || im == 10){//BB if(im == 10){ check[0] = 1; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries_force(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(im == 100)//north outlet { f0 = fin[f_mem(0 ,x,y-1,z,pitch)]; f1 = fin[f_mem(1 ,x,y-1,z,pitch)]; f3 = fin[f_mem(3 ,x,y-1,z,pitch)]; f2 = fin[f_mem(2 ,x,y-1,z,pitch)]; f5 = fin[f_mem(5 ,x,y-1,z,pitch)]; f6 = fin[f_mem(6 ,x,y-1,z,pitch)]; f4 = fin[f_mem(4 ,x,y-1,z,pitch)]; f7 = fin[f_mem(7 ,x,y-1,z,pitch)]; f8 = fin[f_mem(8 ,x,y-1,z,pitch)]; f9 = fin[f_mem(9 ,x,y-1,z,pitch)]; f10= fin[f_mem(10,x,y-1,z,pitch)]; f11= fin[f_mem(11,x,y-1,z,pitch)]; f12= fin[f_mem(12,x,y-1,z,pitch)]; f13= fin[f_mem(13,x,y-1,z,pitch)]; f14= fin[f_mem(14,x,y-1,z,pitch)]; f15= fin[f_mem(15,x,y-1,z,pitch)]; f16= fin[f_mem(16,x,y-1,z,pitch)]; f17= fin[f_mem(17,x,y-1,z,pitch)]; f18= fin[f_mem(18,x,y-1,z,pitch)]; float rho,u,v,w; rho = 1.0f; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2)); f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10)); f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10)); f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); } if(im == 200)//south inlet { f0 = fin[f_mem(0 ,x,y+1,z,pitch)]; f1 = fin[f_mem(1 ,x,y+1,z,pitch)]; f3 = fin[f_mem(3 ,x,y+1,z,pitch)]; f2 = fin[f_mem(2 ,x,y+1,z,pitch)]; f5 = fin[f_mem(5 ,x,y+1,z,pitch)]; f6 = fin[f_mem(6 ,x,y+1,z,pitch)]; f4 = fin[f_mem(4 ,x,y+1,z,pitch)]; f7 = fin[f_mem(7 ,x,y+1,z,pitch)]; f8 = fin[f_mem(8 ,x,y+1,z,pitch)]; f9 = fin[f_mem(9 ,x,y+1,z,pitch)]; f10= fin[f_mem(10,x,y+1,z,pitch)]; f11= fin[f_mem(11,x,y+1,z,pitch)]; f12= fin[f_mem(12,x,y+1,z,pitch)]; f13= fin[f_mem(13,x,y+1,z,pitch)]; f14= fin[f_mem(14,x,y+1,z,pitch)]; f15= fin[f_mem(15,x,y+1,z,pitch)]; f16= fin[f_mem(16,x,y+1,z,pitch)]; f17= fin[f_mem(17,x,y+1,z,pitch)]; f18= fin[f_mem(18,x,y+1,z,pitch)]; float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18; u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = UMAX;//f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2)); f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10)); f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10)); f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); } float Cs = 0.01f; //if(XDIM-x < 64.f) ////Cs = 0.01f+(x-64.f)/64.f*(x-64.f)/64.f*0.1f; //Cs = 0.01f*pow(2.f,((x-448.f)/16.f)); if(MODEL == "MRT") mrt_collide_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,CS); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } syncthreads(); if(check[0] == 1 && t>=STARTF && REFINEMENT == "NO"){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } // } } __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 // && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 || // (x>XDIM-1)){ // } // else{ f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; //f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)]; if(z != ZDIM-1) f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; if(im == 1 || im ==10){//BB fB[f_mem(1 ,x,y,z,pitch)] = f3 ; fB[f_mem(2 ,x,y,z,pitch)] = f4 ; fB[f_mem(3 ,x,y,z,pitch)] = f1 ; fB[f_mem(4 ,x,y,z,pitch)] = f2 ; fB[f_mem(5 ,x,y,z,pitch)] = f7 ; fB[f_mem(6 ,x,y,z,pitch)] = f8 ; fB[f_mem(7 ,x,y,z,pitch)] = f5 ; fB[f_mem(8 ,x,y,z,pitch)] = f6 ; fB[f_mem(9 ,x,y,z,pitch)] = f14; fB[f_mem(10,x,y,z,pitch)] = f17; fB[f_mem(11,x,y,z,pitch)] = f18; fB[f_mem(12,x,y,z,pitch)] = f15; fB[f_mem(13,x,y,z,pitch)] = f16; fB[f_mem(14,x,y,z,pitch)] = f9 ; fB[f_mem(15,x,y,z,pitch)] = f12; fB[f_mem(16,x,y,z,pitch)] = f13; fB[f_mem(17,x,y,z,pitch)] = f10; fB[f_mem(18,x,y,z,pitch)] = f11; } else{ boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } // } } __device__ __inline__ float ld_gb1_cg(const float *addr){ float return_value; asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr)); return return_value; } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = UMAX; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; if(MODEL == "BGK"){ f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } else{ float f0 = 0.1904761791f*rho+-0.597127747f*usqr ; float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); f1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f[j+0 *pitch*YDIM*ZDIM]=f0 ; f[j+1 *pitch*YDIM*ZDIM]=f1 ; f[j+2 *pitch*YDIM*ZDIM]=f2 ; f[j+3 *pitch*YDIM*ZDIM]=f3 ; f[j+4 *pitch*YDIM*ZDIM]=f4 ; f[j+5 *pitch*YDIM*ZDIM]=f5 ; f[j+6 *pitch*YDIM*ZDIM]=f6 ; f[j+7 *pitch*YDIM*ZDIM]=f7 ; f[j+8 *pitch*YDIM*ZDIM]=f8 ; f[j+9 *pitch*YDIM*ZDIM]=f9 ; f[j+10*pitch*YDIM*ZDIM]=f10; f[j+11*pitch*YDIM*ZDIM]=f11; f[j+12*pitch*YDIM*ZDIM]=f12; f[j+13*pitch*YDIM*ZDIM]=f13; f[j+14*pitch*YDIM*ZDIM]=f14; f[j+15*pitch*YDIM*ZDIM]=f15; f[j+16*pitch*YDIM*ZDIM]=f16; f[j+17*pitch*YDIM*ZDIM]=f17; f[j+18*pitch*YDIM*ZDIM]=f18; } if(x == XDIM-1){ for(int i = XDIM; i<pitch; i++){ j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) f[j+0 *pitch*YDIM*ZDIM]=0.f; f[j+1 *pitch*YDIM*ZDIM]=0.f; f[j+2 *pitch*YDIM*ZDIM]=0.f; f[j+3 *pitch*YDIM*ZDIM]=0.f; f[j+4 *pitch*YDIM*ZDIM]=0.f; f[j+5 *pitch*YDIM*ZDIM]=0.f; f[j+6 *pitch*YDIM*ZDIM]=0.f; f[j+7 *pitch*YDIM*ZDIM]=0.f; f[j+8 *pitch*YDIM*ZDIM]=0.f; f[j+9 *pitch*YDIM*ZDIM]=0.f; f[j+10*pitch*YDIM*ZDIM]=0.f; f[j+11*pitch*YDIM*ZDIM]=0.f; f[j+12*pitch*YDIM*ZDIM]=0.f; f[j+13*pitch*YDIM*ZDIM]=0.f; f[j+14*pitch*YDIM*ZDIM]=0.f; f[j+15*pitch*YDIM*ZDIM]=0.f; f[j+16*pitch*YDIM*ZDIM]=0.f; f[j+17*pitch*YDIM*ZDIM]=0.f; f[j+18*pitch*YDIM*ZDIM]=0.f; } } } __global__ void initialize_LR(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int im = ImageFcn(xcoord,ycoord,zcoord); float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = UMAX;//0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; if(MODEL == "BGK"){ f[j+0 *pitch*YLRDIM*ZLRDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); f[j+12*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); f[j+14*pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } else{ float f0 = 0.1904761791f*rho+-0.597127747f*usqr ; float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); f1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f[j+0 *pitch*YLRDIM*ZLRDIM]=f0 ; f[j+1 *pitch*YLRDIM*ZLRDIM]=f1 ; f[j+2 *pitch*YLRDIM*ZLRDIM]=f2 ; f[j+3 *pitch*YLRDIM*ZLRDIM]=f3 ; f[j+4 *pitch*YLRDIM*ZLRDIM]=f4 ; f[j+5 *pitch*YLRDIM*ZLRDIM]=f5 ; f[j+6 *pitch*YLRDIM*ZLRDIM]=f6 ; f[j+7 *pitch*YLRDIM*ZLRDIM]=f7 ; f[j+8 *pitch*YLRDIM*ZLRDIM]=f8 ; f[j+9 *pitch*YLRDIM*ZLRDIM]=f9 ; f[j+10*pitch*YLRDIM*ZLRDIM]=f10; f[j+11*pitch*YLRDIM*ZLRDIM]=f11; f[j+12*pitch*YLRDIM*ZLRDIM]=f12; f[j+13*pitch*YLRDIM*ZLRDIM]=f13; f[j+14*pitch*YLRDIM*ZLRDIM]=f14; f[j+15*pitch*YLRDIM*ZLRDIM]=f15; f[j+16*pitch*YLRDIM*ZLRDIM]=f16; f[j+17*pitch*YLRDIM*ZLRDIM]=f17; f[j+18*pitch*YLRDIM*ZLRDIM]=f18; } } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { //int *image_d, *image_h; ofstream output; ofstream output2; string FileName = CASENAME; //output.open ("LBM1_out.dat"); output.open ((FileName+".dat").c_str()); output2.open ((FileName+".force").c_str()); size_t memsize, memsize2; size_t pitch = 0; size_t pitch2 = 0; int i, n, nBlocks, nBlocks2, n2; float omega, CharLength, omega2; if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } CharLength = OBSTR1*2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega2-1.0f)); } float SF_cf = omega*(1.0f-omega2)/((1.0f-omega)*omega2/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<"omega : "<<omega<<endl; cout<<"omega2: "<<omega2<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"LRblocksize: "<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; cout<<"LRgrid: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; cout<<"Method: "<<METHOD<<endl; cout<<"Model: "<<MODEL<<endl; nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); nBlocks2 = (XLRDIM/BLOCKSIZELRX+XLRDIM%BLOCKSIZELRX)*(YLRDIM/BLOCKSIZELRY+YLRDIM%BLOCKSIZELRY) *(ZLRDIM/BLOCKSIZELRZ+ZLRDIM%BLOCKSIZELRZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; int B2 = BLOCKSIZELRX*BLOCKSIZELRY*BLOCKSIZELRZ; n = nBlocks*B; n2 = nBlocks2*B2; cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); dim3 threads2(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 grid2(XLRDIM/BLOCKSIZELRX,YLRDIM/BLOCKSIZELRY,ZLRDIM/BLOCKSIZELRZ); memsize = n*sizeof(float); //memsize_int = n*sizeof(int); memsize2 = n2*sizeof(float); //hipExtent extent = make_hipExtent(XDIM*sizeof(float),YDIM,ZDIM); //image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d,*fC_h,*fC_d,*fD_d; float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d; float *uAv_h,*vAv_h,*wAv_h,*uAv_d,*vAv_d,*wAv_d; float *uAvLR_h,*vAvLR_h,*wAvLR_h,*uAvLR_d,*vAvLR_d,*wAvLR_d; float *ufluc_h,*vfluc_h,*wfluc_h,*ufluc_d,*vfluc_d,*wfluc_d; float *uflucLR_h,*vflucLR_h,*wflucLR_h,*uflucLR_d,*vflucLR_d,*wflucLR_d; fA_h = (float *)malloc(memsize*19); fC_h = (float *)malloc(memsize2*19); FX_h = (float *)malloc(TMAX*sizeof(float)); FY_h = (float *)malloc(TMAX*sizeof(float)); FZ_h = (float *)malloc(TMAX*sizeof(float)); uAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); vAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); wAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); uAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); vAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); wAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); ufluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); vfluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); wfluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); uflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); vflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); wflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); hipMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); hipMallocPitch((void **) &uAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); hipMallocPitch((void **) &vAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); hipMallocPitch((void **) &wAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); hipMallocPitch((void **) &ufluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); hipMallocPitch((void **) &vfluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); hipMallocPitch((void **) &wfluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); if(REFINEMENT == "YES"){ hipMallocPitch((void **) &fC_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19); hipMallocPitch((void **) &fD_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19); if(VELAV == "YES"){ hipMallocPitch((void **) &uAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); hipMallocPitch((void **) &vAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); hipMallocPitch((void **) &wAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); hipMallocPitch((void **) &uflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); hipMallocPitch((void **) &vflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); hipMallocPitch((void **) &wflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); } } hipMalloc((void **) &FX_d, TMAX*sizeof(float)); hipMalloc((void **) &FY_d, TMAX*sizeof(float)); hipMalloc((void **) &FZ_d, TMAX*sizeof(float)); cout<<pitch<<", "<<pitch2<<endl; size_t pitch_elements = pitch/sizeof(float); size_t pitch_elements2 = pitch2/sizeof(float); hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n2*19; i++) { fC_h[i] = 0; } for (i = 0; i < TMAX; i++){ FX_h[i] = 0.f; FY_h[i] = 0.f; FZ_h[i] = 0.f; } for (i = 0; i < n; i++) { uAv_h[i] = 0; vAv_h[i] = 0; wAv_h[i] = 0; ufluc_h[i] = 0; vfluc_h[i] = 0; wfluc_h[i] = 0; } for (i = 0; i < n2; i++) { uAvLR_h[i] = 0; vAvLR_h[i] = 0; wAvLR_h[i] = 0; uflucLR_h[i] = 0; vflucLR_h[i] = 0; wflucLR_h[i] = 0; } hipMemcpy(FX_d, FX_h, TMAX*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(FY_d, FY_h, TMAX*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), hipMemcpyHostToDevice); if(VELAV == "YES"){ hipMemcpy2D(uAv_d, pitch, uAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); hipMemcpy2D(vAv_d, pitch, vAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); hipMemcpy2D(wAv_d, pitch, wAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); hipMemcpy2D(ufluc_d, pitch, uAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); hipMemcpy2D(vfluc_d, pitch, vAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); hipMemcpy2D(wfluc_d, pitch, wAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, hipMemcpyHostToDevice); if(REFINEMENT == "YES"){ hipMemcpy2D(uAvLR_d, pitch2, uAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); hipMemcpy2D(vAvLR_d, pitch2, vAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); hipMemcpy2D(wAvLR_d, pitch2, wAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); hipMemcpy2D(uflucLR_d,pitch2, uAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); hipMemcpy2D(vflucLR_d,pitch2, vAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); hipMemcpy2D(wflucLR_d,pitch2, wAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, hipMemcpyHostToDevice); } } // for (i = 0; i < n; i++) // { // int x = i%XDIM; // int y = (i/XDIM)%YDIM; // int z = (i/XDIM)/YDIM; //// image_h[i] = 0; //// if(x < 1) image_h[i] = 1;//DirichletWest //// if(x > XDIM-2) image_h[i] = 1;//BB //// if(y < 1) image_h[i] = 1;//BB //// if(y > YDIM-2) image_h[i] = 1;//BB //// if(z < 1) image_h[i] = 1;//DirichletWest //// if(z > ZDIM-2) image_h[i] = 1;//BB // } //hipMemcpy(image_d, image_h, memsize_int, hipMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = hipFilterModeLinear; texRef_f1B.filterMode = hipFilterModeLinear; texRef_f2B.filterMode = hipFilterModeLinear; texRef_f3B.filterMode = hipFilterModeLinear; texRef_f4B.filterMode = hipFilterModeLinear; texRef_f5B.filterMode = hipFilterModeLinear; texRef_f6B.filterMode = hipFilterModeLinear; texRef_f7B.filterMode = hipFilterModeLinear; texRef_f8B.filterMode = hipFilterModeLinear; texRef_f9B.filterMode = hipFilterModeLinear; texRef_f10B.filterMode = hipFilterModeLinear; texRef_f11B.filterMode = hipFilterModeLinear; texRef_f12B.filterMode = hipFilterModeLinear; texRef_f13B.filterMode = hipFilterModeLinear; texRef_f14B.filterMode = hipFilterModeLinear; texRef_f15B.filterMode = hipFilterModeLinear; texRef_f16B.filterMode = hipFilterModeLinear; texRef_f17B.filterMode = hipFilterModeLinear; texRef_f18B.filterMode = hipFilterModeLinear; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = hipFilterModeLinear; texRef_f1A.filterMode = hipFilterModeLinear; texRef_f2A.filterMode = hipFilterModeLinear; texRef_f3A.filterMode = hipFilterModeLinear; texRef_f4A.filterMode = hipFilterModeLinear; texRef_f5A.filterMode = hipFilterModeLinear; texRef_f6A.filterMode = hipFilterModeLinear; texRef_f7A.filterMode = hipFilterModeLinear; texRef_f8A.filterMode = hipFilterModeLinear; texRef_f9A.filterMode = hipFilterModeLinear; texRef_f10A.filterMode = hipFilterModeLinear; texRef_f11A.filterMode = hipFilterModeLinear; texRef_f12A.filterMode = hipFilterModeLinear; texRef_f13A.filterMode = hipFilterModeLinear; texRef_f14A.filterMode = hipFilterModeLinear; texRef_f15A.filterMode = hipFilterModeLinear; texRef_f16A.filterMode = hipFilterModeLinear; texRef_f17A.filterMode = hipFilterModeLinear; texRef_f18A.filterMode = hipFilterModeLinear; // if(REFINEMENT == "YES"){ texRef_f0C.normalized = false; texRef_f1C.normalized = false; texRef_f2C.normalized = false; texRef_f3C.normalized = false; texRef_f4C.normalized = false; texRef_f5C.normalized = false; texRef_f6C.normalized = false; texRef_f7C.normalized = false; texRef_f8C.normalized = false; texRef_f9C.normalized = false; texRef_f10C.normalized = false; texRef_f11C.normalized = false; texRef_f12C.normalized = false; texRef_f13C.normalized = false; texRef_f14C.normalized = false; texRef_f15C.normalized = false; texRef_f16C.normalized = false; texRef_f17C.normalized = false; texRef_f18C.normalized = false; texRef_f0C.filterMode = hipFilterModeLinear; texRef_f1C.filterMode = hipFilterModeLinear; texRef_f2C.filterMode = hipFilterModeLinear; texRef_f3C.filterMode = hipFilterModeLinear; texRef_f4C.filterMode = hipFilterModeLinear; texRef_f5C.filterMode = hipFilterModeLinear; texRef_f6C.filterMode = hipFilterModeLinear; texRef_f7C.filterMode = hipFilterModeLinear; texRef_f8C.filterMode = hipFilterModeLinear; texRef_f9C.filterMode = hipFilterModeLinear; texRef_f10C.filterMode = hipFilterModeLinear; texRef_f11C.filterMode = hipFilterModeLinear; texRef_f12C.filterMode = hipFilterModeLinear; texRef_f13C.filterMode = hipFilterModeLinear; texRef_f14C.filterMode = hipFilterModeLinear; texRef_f15C.filterMode = hipFilterModeLinear; texRef_f16C.filterMode = hipFilterModeLinear; texRef_f17C.filterMode = hipFilterModeLinear; texRef_f18C.filterMode = hipFilterModeLinear; texRef_f0D.normalized = false; texRef_f1D.normalized = false; texRef_f2D.normalized = false; texRef_f3D.normalized = false; texRef_f4D.normalized = false; texRef_f5D.normalized = false; texRef_f6D.normalized = false; texRef_f7D.normalized = false; texRef_f8D.normalized = false; texRef_f9D.normalized = false; texRef_f10D.normalized = false; texRef_f11D.normalized = false; texRef_f12D.normalized = false; texRef_f13D.normalized = false; texRef_f14D.normalized = false; texRef_f15D.normalized = false; texRef_f16D.normalized = false; texRef_f17D.normalized = false; texRef_f18D.normalized = false; texRef_f0D.filterMode = hipFilterModeLinear; texRef_f1D.filterMode = hipFilterModeLinear; texRef_f2D.filterMode = hipFilterModeLinear; texRef_f3D.filterMode = hipFilterModeLinear; texRef_f4D.filterMode = hipFilterModeLinear; texRef_f5D.filterMode = hipFilterModeLinear; texRef_f6D.filterMode = hipFilterModeLinear; texRef_f7D.filterMode = hipFilterModeLinear; texRef_f8D.filterMode = hipFilterModeLinear; texRef_f9D.filterMode = hipFilterModeLinear; texRef_f10D.filterMode = hipFilterModeLinear; texRef_f11D.filterMode = hipFilterModeLinear; texRef_f12D.filterMode = hipFilterModeLinear; texRef_f13D.filterMode = hipFilterModeLinear; texRef_f14D.filterMode = hipFilterModeLinear; texRef_f15D.filterMode = hipFilterModeLinear; texRef_f16D.filterMode = hipFilterModeLinear; texRef_f17D.filterMode = hipFilterModeLinear; texRef_f18D.filterMode = hipFilterModeLinear; // } for(int i = 0; i<2; i++){ texRef_f0A.addressMode[i] = hipAddressModeClamp; texRef_f1A.addressMode[i] = hipAddressModeClamp; texRef_f2A.addressMode[i] = hipAddressModeClamp; texRef_f3A.addressMode[i] = hipAddressModeClamp; texRef_f4A.addressMode[i] = hipAddressModeClamp; texRef_f5A.addressMode[i] = hipAddressModeClamp; texRef_f6A.addressMode[i] = hipAddressModeClamp; texRef_f7A.addressMode[i] = hipAddressModeClamp; texRef_f8A.addressMode[i] = hipAddressModeClamp; texRef_f9A.addressMode[i] = hipAddressModeClamp; texRef_f10A.addressMode[i] = hipAddressModeClamp; texRef_f11A.addressMode[i] = hipAddressModeClamp; texRef_f12A.addressMode[i] = hipAddressModeClamp; texRef_f13A.addressMode[i] = hipAddressModeClamp; texRef_f14A.addressMode[i] = hipAddressModeClamp; texRef_f15A.addressMode[i] = hipAddressModeClamp; texRef_f16A.addressMode[i] = hipAddressModeClamp; texRef_f17A.addressMode[i] = hipAddressModeClamp; texRef_f18A.addressMode[i] = hipAddressModeClamp; texRef_f0B.addressMode[i] = hipAddressModeClamp; texRef_f1B.addressMode[i] = hipAddressModeClamp; texRef_f2B.addressMode[i] = hipAddressModeClamp; texRef_f3B.addressMode[i] = hipAddressModeClamp; texRef_f4B.addressMode[i] = hipAddressModeClamp; texRef_f5B.addressMode[i] = hipAddressModeClamp; texRef_f6B.addressMode[i] = hipAddressModeClamp; texRef_f7B.addressMode[i] = hipAddressModeClamp; texRef_f8B.addressMode[i] = hipAddressModeClamp; texRef_f9B.addressMode[i] = hipAddressModeClamp; texRef_f10B.addressMode[i] = hipAddressModeClamp; texRef_f11B.addressMode[i] = hipAddressModeClamp; texRef_f12B.addressMode[i] = hipAddressModeClamp; texRef_f13B.addressMode[i] = hipAddressModeClamp; texRef_f14B.addressMode[i] = hipAddressModeClamp; texRef_f15B.addressMode[i] = hipAddressModeClamp; texRef_f16B.addressMode[i] = hipAddressModeClamp; texRef_f17B.addressMode[i] = hipAddressModeClamp; texRef_f18B.addressMode[i] = hipAddressModeClamp; } } hipMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); hipMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyHostToDevice); if(REFINEMENT == "YES"){ hipMemcpy2D(fC_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyHostToDevice); hipMemcpy2D(fD_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyHostToDevice); } // for (i = 0; i < n*19; i++) // { // fA_h[i] = 0; // fC_h[i] = 1; // } if(true)//bind texture { hipBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); hipBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); // if(REFINEMENT == "YES"){ hipBindTexture2D(0,&texRef_f0C, fC_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f1C, fC_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f2C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f3C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f4C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f5C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f6C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f7C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f8C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f9C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f10C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f11C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f12C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f13C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f14C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f15C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f16C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f17C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f18C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f0D, fD_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f1D, fD_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f2D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f3D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f4D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f5D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f6D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f7D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f8D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f9D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f10D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f11D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f12D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f13D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f14D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f15D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f16D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f17D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); hipBindTexture2D(0,&texRef_f18D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); // } } hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements); hipLaunchKernelGGL(( initialize_single), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements); if(REFINEMENT == "YES"){ hipLaunchKernelGGL(( initialize_LR), dim3(grid2), dim3(threads2), 0, 0, fC_d,pitch_elements2); hipLaunchKernelGGL(( initialize_LR), dim3(grid2), dim3(threads2), 0, 0, fD_d,pitch_elements2); } hipFuncSetCacheConfig(mrt_d_single,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_ABCD_force,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_ABCD,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_ABCD2,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_ABDC2,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_ABDC_Interp,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_BACD_force,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_BACD,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_BADC2,hipFuncCachePreferL1); hipFuncSetCacheConfig(LR_d_BADC_Interp,hipFuncCachePreferL1); hipFuncSetCacheConfig(ExtractFromC_d,hipFuncCachePreferL1); hipFuncSetCacheConfig(simple_copy,hipFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; hipDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ if(METHOD == "SINGLE"){ if(t >= STARTF) hipLaunchKernelGGL(( mrt_d_single_force), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t,uAv_d,vAv_d,ufluc_d,vfluc_d); else hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fA_d,fB_d,omega,pitch_elements); if(REFINEMENT == "YES"){ if(LRFACTOR == 0.5f) { if(t >= STARTF) hipLaunchKernelGGL(( LR_d_ABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else hipLaunchKernelGGL(( LR_d_ABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); //LR_d_ABDC<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); hipLaunchKernelGGL(( LR_d_ABDC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } else if(LRFACTOR == 0.25f) { if(t >= STARTF) hipLaunchKernelGGL(( LR_d_ABCD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else hipLaunchKernelGGL(( LR_d_ABCD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_ABDC2), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_ABCD2), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,3,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_ABDC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } //ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,SF_fc); hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements,omega,omega2); } if(t >= STARTF) hipLaunchKernelGGL(( mrt_d_single_force), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1,uAv_d,vAv_d,ufluc_d,vfluc_d); else hipLaunchKernelGGL(( mrt_d_single), dim3(grid), dim3(threads), 0, 0, fB_d,fA_d,omega,pitch_elements); if(REFINEMENT == "YES"){ if(LRFACTOR == 0.5f) { if(t >= STARTF) hipLaunchKernelGGL(( LR_d_BACD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else hipLaunchKernelGGL(( LR_d_BACD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_BADC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } else if(LRFACTOR == 0.25f) { if(t >= STARTF) hipLaunchKernelGGL(( LR_d_BACD_force), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else hipLaunchKernelGGL(( LR_d_BACD), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_BADC2), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_ABCD2), dim3(grid2), dim3(threads2), 0, 0, fC_d,fD_d,omega2,pitch_elements2,3,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); hipLaunchKernelGGL(( LR_d_BADC_Interp), dim3(grid2), dim3(threads2), 0, 0, fD_d,fC_d,omega2,pitch_elements2,SF_cf,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } //ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,SF_fc); hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements,omega,omega2); } } // else if(METHOD == "CACHE"){ // mrt_d_cache<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_cache<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // // else if(METHOD == "HYB"){ // if(t >= STARTF && REFINEMENT == "NO") // mrt_d_hybAB_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t); // else // mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // // if(REFINEMENT == "YES"){ // if(LRFACTOR == 0.5f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // else if(LRFACTOR == 0.25f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybABDC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2); // // LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3); // LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // // hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fB_d,pitch_elements,SF_fc); // } // // if(t >= STARTF && REFINEMENT == "NO") // mrt_d_hybBA_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1); // else // mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // // if(REFINEMENT == "YES"){ // if(LRFACTOR == 0.5f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // else if(LRFACTOR == 0.25f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybBADC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2); // // LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3); // LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // // hipLaunchKernelGGL(( ExtractFromC_d), dim3(grid), dim3(threads), 0, 0, fA_d,pitch_elements,SF_fc); // } // } // else if(METHOD == "TEXT"){ // mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // // else if(METHOD == "SHARED"){ // mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // simple_copy<<<grid, threads>>>(fA_d,fB_d,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,pitch_elements); // // simple_text<<<grid, threads>>>(fA_d,fB_d,pitch_elements); // simple_text<<<grid, threads>>>(fB_d,fA_d,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } hipDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; if(REFINEMENT == "YES"){ Nodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL); } else{ Nodes = XDIM*YDIM*ZDIM; } cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)"; if(REFINEMENT == "YES"){ int effNodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL -(XLRDIM/LRLEVEL)*(YLRDIM/LRLEVEL)*(YLRDIM/LRLEVEL)); cout<<" (eff: "<<double(effNodes*double(TMAX/1000000.f))/restime<<"MLUPS)"; } cout<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; if(true){ hipUnbindTexture(texRef_f0A); hipUnbindTexture(texRef_f1A); hipUnbindTexture(texRef_f2A); hipUnbindTexture(texRef_f3A); hipUnbindTexture(texRef_f4A); hipUnbindTexture(texRef_f5A); hipUnbindTexture(texRef_f6A); hipUnbindTexture(texRef_f7A); hipUnbindTexture(texRef_f8A); hipUnbindTexture(texRef_f9A); hipUnbindTexture(texRef_f10A); hipUnbindTexture(texRef_f11A); hipUnbindTexture(texRef_f12A); hipUnbindTexture(texRef_f13A); hipUnbindTexture(texRef_f14A); hipUnbindTexture(texRef_f15A); hipUnbindTexture(texRef_f16A); hipUnbindTexture(texRef_f17A); hipUnbindTexture(texRef_f18A); hipUnbindTexture(texRef_f0B); hipUnbindTexture(texRef_f1B); hipUnbindTexture(texRef_f2B); hipUnbindTexture(texRef_f3B); hipUnbindTexture(texRef_f4B); hipUnbindTexture(texRef_f5B); hipUnbindTexture(texRef_f6B); hipUnbindTexture(texRef_f7B); hipUnbindTexture(texRef_f8B); hipUnbindTexture(texRef_f9B); hipUnbindTexture(texRef_f10B); hipUnbindTexture(texRef_f11B); hipUnbindTexture(texRef_f12B); hipUnbindTexture(texRef_f13B); hipUnbindTexture(texRef_f14B); hipUnbindTexture(texRef_f15B); hipUnbindTexture(texRef_f16B); hipUnbindTexture(texRef_f17B); hipUnbindTexture(texRef_f18B); hipUnbindTexture(texRef_f0C); hipUnbindTexture(texRef_f1C); hipUnbindTexture(texRef_f2C); hipUnbindTexture(texRef_f3C); hipUnbindTexture(texRef_f4C); hipUnbindTexture(texRef_f5C); hipUnbindTexture(texRef_f6C); hipUnbindTexture(texRef_f7C); hipUnbindTexture(texRef_f8C); hipUnbindTexture(texRef_f9C); hipUnbindTexture(texRef_f10C); hipUnbindTexture(texRef_f11C); hipUnbindTexture(texRef_f12C); hipUnbindTexture(texRef_f13C); hipUnbindTexture(texRef_f14C); hipUnbindTexture(texRef_f15C); hipUnbindTexture(texRef_f16C); hipUnbindTexture(texRef_f17C); hipUnbindTexture(texRef_f18C); hipUnbindTexture(texRef_f0D); hipUnbindTexture(texRef_f1D); hipUnbindTexture(texRef_f2D); hipUnbindTexture(texRef_f3D); hipUnbindTexture(texRef_f4D); hipUnbindTexture(texRef_f5D); hipUnbindTexture(texRef_f6D); hipUnbindTexture(texRef_f7D); hipUnbindTexture(texRef_f8D); hipUnbindTexture(texRef_f9D); hipUnbindTexture(texRef_f10D); hipUnbindTexture(texRef_f11D); hipUnbindTexture(texRef_f12D); hipUnbindTexture(texRef_f13D); hipUnbindTexture(texRef_f14D); hipUnbindTexture(texRef_f15D); hipUnbindTexture(texRef_f16D); hipUnbindTexture(texRef_f17D); hipUnbindTexture(texRef_f18D); } hipMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,hipMemcpyDeviceToHost); if(REFINEMENT == "YES"){ hipMemcpy2D(fC_h,XLRDIM*sizeof(float),fC_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,hipMemcpyDeviceToHost); } if(VELAV == "YES"){ hipMemcpy2D(uAv_h,XDIM*sizeof(float),uAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(vAv_h,XDIM*sizeof(float),vAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(wAv_h,XDIM*sizeof(float),wAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(ufluc_h,XDIM*sizeof(float),ufluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(vfluc_h,XDIM*sizeof(float),vfluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); hipMemcpy2D(wfluc_h,XDIM*sizeof(float),wfluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,hipMemcpyDeviceToHost); if(REFINEMENT == "YES"){ hipMemcpy2D(uAvLR_h,XLRDIM*sizeof(float),uAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); hipMemcpy2D(vAvLR_h,XLRDIM*sizeof(float),vAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); hipMemcpy2D(wAvLR_h,XLRDIM*sizeof(float),wAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); hipMemcpy2D(uflucLR_h,XLRDIM*sizeof(float),uflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); hipMemcpy2D(vflucLR_h,XLRDIM*sizeof(float),vflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); hipMemcpy2D(wflucLR_h,XLRDIM*sizeof(float),wflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,hipMemcpyDeviceToHost); } } hipMemcpy(FX_h, FX_d, TMAX*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(FY_h, FY_d, TMAX*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), hipMemcpyDeviceToHost); //hipMemcpy(image_h, image_d, memsize_int, hipMemcpyDeviceToHost); output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w;//, usqr; //int j; int check = 0; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; f0 = fA_h[i+XDIM*YDIM*ZDIM*0 ]; f1 = fA_h[i+XDIM*YDIM*ZDIM*1 ]; f2 = fA_h[i+XDIM*YDIM*ZDIM*2 ]; f3 = fA_h[i+XDIM*YDIM*ZDIM*3 ]; f4 = fA_h[i+XDIM*YDIM*ZDIM*4 ]; f5 = fA_h[i+XDIM*YDIM*ZDIM*5 ]; f6 = fA_h[i+XDIM*YDIM*ZDIM*6 ]; f7 = fA_h[i+XDIM*YDIM*ZDIM*7 ]; f8 = fA_h[i+XDIM*YDIM*ZDIM*8 ]; f9 = fA_h[i+XDIM*YDIM*ZDIM*9 ]; f10= fA_h[i+XDIM*YDIM*ZDIM*10]; f11= fA_h[i+XDIM*YDIM*ZDIM*11]; f12= fA_h[i+XDIM*YDIM*ZDIM*12]; f13= fA_h[i+XDIM*YDIM*ZDIM*13]; f14= fA_h[i+XDIM*YDIM*ZDIM*14]; f15= fA_h[i+XDIM*YDIM*ZDIM*15]; f16= fA_h[i+XDIM*YDIM*ZDIM*16]; f17= fA_h[i+XDIM*YDIM*ZDIM*17]; f18= fA_h[i+XDIM*YDIM*ZDIM*18]; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); // float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); // float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); // float m13 = f5+-f6+ f7+-f8 -u*v; // float m14 = f11 +- f13 + - f16 + f18 -v*w; // float m15 = f10 + - f12 +-f15 + f17 -u*w; // float PI11 = -0.026315789f*m1-0.5f *omega*m9; // float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // // float PI12 = -1.5f*omega*m13; // float PI23 = -1.5f*omega*m14; // float PI13 = -1.5f*omega*m15; // //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float Smag = Q*3.f*omega/(sqrt(2.f)); output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<Smag<<"," <<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl; if(rho>0.f && rho<2.f){ } else{ check = 1; } } } } if(check == 1) cout<<"error!"<<endl; if(REFINEMENT == "YES"){ output<<endl;//<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\"\n"; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM-0<<", J="<<YLRDIM-0<<", K="<<ZLRDIM-0<<"\n"; for(dep = 0; dep<ZLRDIM-0; dep++){ for(row = 0; row<YLRDIM-0; row++){ for(col = 0; col<XLRDIM-0; col++){ i = dep*XLRDIM*YLRDIM+row*XLRDIM+col; f0 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*0 ]; f1 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1 ]; f2 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2 ]; f3 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3 ]; f4 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4 ]; f5 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]; f6 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]; f7 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]; f8 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ]; f9 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9 ]; f10= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]; f11= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]; f12= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]; f13= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]; f14= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]; f15= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]; f16= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]; f17= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]; f18= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18]; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); // float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); // float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); // float m13 = f5+-f6+ f7+-f8 -u*v; // float m14 = f11 +- f13 + - f16 + f18 -v*w; // float m15 = f10 + - f12 +-f15 + f17 -u*w; // // float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9; // float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float Smag = Q*LRLEVEL*3.f*omega2/(sqrt(2.f)); output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", " <<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<Smag<<"," <<uAvLR_h[i]<<","<<vAvLR_h[i]<<", "<<uflucLR_h[i]<<","<<vflucLR_h[i]<<endl; //output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; } } } } output.close(); //for(int t = STARTF-1; t<TMAX; t++){ for(int t = 0; t<TMAX; t++){ output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", " <<FY_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", " <<FZ_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<endl; // output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", " // <<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", " // <<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<endl; } output2.close(); //hipFree(image_d); hipFree(fA_d); hipFree(fB_d); hipFree(fC_d); hipFree(fD_d); return(0); }
fdd2cfb14e8021127286f80dc44263a6ef87da83.cu
#include <cuda.h> //#include <cutil.h> #include <iostream> #include <ostream> #include <fstream> //#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h" using namespace std; #define CASENAME "Stest" #define BLOCKSIZEX 128 #define BLOCKSIZEY 1 #define BLOCKSIZEZ 1 #define BLOCKSIZELRX 64 #define BLOCKSIZELRY 1 #define BLOCKSIZELRZ 1 #define XDIM 128 #define YDIM 160 #define ZDIM 1 #define TMAX 4000 #define STARTF 0 #define OBSTR1 4.f #define OBSTX1 63.5f #define OBSTY1 47.5f #define OBSTZ1 31.5f #define OBSTR2 4.f #define OBSTX2 63.5f #define OBSTY2 31.5f #define OBSTZ2 31.5f #define LRFACTOR 0.5f #define LRLEVEL 2 #define LRX0 31.75f //minimum x coord of LR #define XLRDIM 128 //number of nodes in x #define LRY0 23.75f #define YLRDIM 128 #define LRZ0 -0.25f #define ZLRDIM 2 //#define LRFACTOR 0.25f //#define LRLEVEL 4 //#define LRX0 31.625f //minimum x coord of LR //#define XLRDIM 256 //number of nodes in x //#define LRY0 23.625f //#define YLRDIM 256 //#define LRZ0 -0.375f //#define ZLRDIM 4 #define RE 100.f//2000.f//100.f; #define UMAX 0.08f #define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE #define REFINEMENT "YES" //YES,NO #define SmagLES "NO" //YES,NO #define MODEL "MRT" //BGK,MRT,STREAM #define ZPERIODIC "YES" #define VELAV "YES" #define START_VELAV 10000 #define START_VELFLUC 100000 #define CS 0.01f //#define CHARLENGTH = XDIM-2.f; //#define BLOCKSIZE 16; //int const XDIM = 32; //int const YDIM = 32; #include <sys/time.h> #include <time.h> /* Image List: 0 fluid 1 BB 2 3 DirichletWest(simple) 10 BB(force) 13 DirichletWest_Reg 14 NeumannEast_Reg 15 DirichletNorth_Reg 16 DirichletSouth_Reg 21 ysymmetry_top 22 ysymmetry_bot 23 zsymmetry_top 24 zsymmetry_bot 25 xsymmetry_top 26 xsymmetry_bot */ inline __device__ int ImageFcn(float x, float y, float z){ // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // return 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // return 10; //if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1) // { // return 10; // } // else // //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f) // if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f) // return 1; // else if(x < 17.5f) // return 13; // else if(x > 78.5f) // return 14; // else if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) return 10; else return 0; } inline __device__ int ImageFcn(int x, int y, int z){ int value = 0; //Cylinder // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1) // value = 10; // else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2) // value = 10; //Sphere // if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1) // { //// if(z == 0 || z == ZDIM-1) //// return 1; //// else // return 10; // } // if(z == 0) // value = 0; // else if(z == ZDIM-1) // value = 0; if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1) value = 10; else if(y == 0) value = 200;//22; else if(y == YDIM-1) value = 100; else if(x == 0) value = 26; else if(x == XDIM-1) value = 25; else if(z == 0) value = 0; else if(z == ZDIM-1) value = 0; return value; //Lid Driven Cavity // if(x == XDIM-1 || y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1) // return 1; // else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2) // return 1; // else if(x == 0) // return 1; // else if(x == 1) // return 53; // else // return 0; } inline __device__ float PoisProf (float x){ float radius = (YDIM-1-1)*0.5f; float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f); return (result); // return 1.f; } __device__ void DirichletWest(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(y == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(y == YDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float u,v,w;//,rho; u = UMAX;//*PoisProf(zcoord)*1.5; v = 0.0f; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float usqr = u*u+v*v+w*w; f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);; f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); //// f0 = 1.0f/3.0f*(rho-1.5f*usqr); // f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); //// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); //// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); //// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); //// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); //// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); //// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); //// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); //// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); //// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); //// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); //// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); //// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); //// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __device__ void DirichletWest_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0){ f2 = f4; f6 = f7; f11 = f13; f16 = f18; } else if(y == YDIM-1){ f4 = f2; f7 = f6; f13 = f11; f18 = f16; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX;//*PoisProf(y)*1.5; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; // f1 =(0.166666667f*u)+ // (f3-(-(0.166666667f*u))); f1 = f3+0.33333333f*u; // f5 =(0.0833333333f*( u+v))+ // (f7-(0.0833333333f*(-u-v))); f5 = f7+0.166666667f*(u+v); // f8 =(0.0833333333f*( u-v ))+ // (f6-(0.0833333333f*(-u+v ))); f8 = f6+0.166666667f*(u-v); // f10=(0.0833333333f*( u+w))+ // (f17-(0.0833333333f*(-u-w))); f10= f17+0.166666667f*(u+w); // f15=(0.0833333333f*( u-w))+ // (f12-(0.0833333333f*(-u+w))); f15= f12+0.166666667f*(u-w); // f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+ // (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)); // f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+ // (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)); // f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+ // (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)); // f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+ // (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)); // f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+ // (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)); // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float PI11 = 0; float PI12 = 0; float PI22 = 0; float PI33 = 0; float PI13 = 0; float PI23 = 0; float u;//,v;//,w;//,rho; u = UMAX;//*PoisProf(z)*1.5; //v = 0.0f; //w = 0.0f; float usqr = u*u;//+v*v+w*w; float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho -1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho -1.5f*usqr); float feq9 = 0.0555555556f*(rho -1.5f*usqr); float feq14 = 0.0555555556f*(rho -1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq11 = 0.0277777778f*(rho -1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq13 = 0.0277777778f*(rho -1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq16 = 0.0277777778f*(rho -1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq18 = 0.0277777778f*(rho -1.5f*usqr); // float feq0 = 0.3333333333f*(rho-1.5f*usqr); // float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); // float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); // float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); // float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); // float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); // float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); // float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); // float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); // float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); // float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); // float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f1 = feq1 +f3 -feq3 ; f5 = feq5 +f7 -feq7 ; f8 = feq8 +f6 -feq6 ; f10= feq10+f17-feq17; f15= feq15+f12-feq12; PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; } void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; f13 = f11; f18 = f16; f8 = f5; } else if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f18 = f13; } } float PI11 = 0; float PI12 = 0; float PI22 = 0; float PI33 = 0; float PI13 = 0; float PI23 = 0; float u;//,v;//,w;//,rho; float rho = 1.0f; //v = 0.0f; //w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i float usqr = u*u;//+v*v+w*w; float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho -1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho -1.5f*usqr); float feq9 = 0.0555555556f*(rho -1.5f*usqr); float feq14 = 0.0555555556f*(rho -1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq11 = 0.0277777778f*(rho -1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq13 = 0.0277777778f*(rho -1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr); float feq16 = 0.0277777778f*(rho -1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr); float feq18 = 0.0277777778f*(rho -1.5f*usqr); // float feq0 = 0.3333333333f*(rho-1.5f*usqr); // float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); // float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); // float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); // float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); // float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); // float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); // float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr); // float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); // float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); // float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr); // float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr); // float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr); // float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); // float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); // float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr); // float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr); // float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); // float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f3 = feq3 +f1 -feq1 ; f7 = feq7 +f5 -feq5 ; f6 = feq6 +f8 -feq8 ; f17= feq17+f10-feq10; f12= feq12+f15-feq15; PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ; f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ; f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ; f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ; } __device__ void NeumannEast(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; // f6 = f7; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; // f7 = f6; f13 = f11; f18 = f16; f8 = f5; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; // f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; // f17 = f12; f18 = f13; } } float u,v,w;//,rho; float rho = 1.0f; v = 0.0f; w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i float u2 = u*u; float v2 = v*v; float w2 = w*w; float usqr = u2+v2+w2; // f3 = f1 -0.333333333f*u; // f7 = f5 -0.166666667f*(u+v); // f6 = f8 -0.166666667f*(u-v); // f17= f10-0.166666667f*(u+w); // f12= f15-0.166666667f*(u-w); f0 = 1.0f/3.0f*(rho-1.5f*usqr); f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } __device__ void NeumannEast_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; // f6 = f7; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; // f7 = f6; f13 = f11; f18 = f16; f8 = f5; } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; // f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; // f17 = f12; f18 = f13; } } float u,v,w;//,rho; float rho = 1.0f; v = 0.0f; w = 0.0f; u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; f3 = f1 -0.333333333f*u; f7 = f5 -0.166666667f*(u+v); f6 = f8 -0.166666667f*(u-v); f17= f10-0.166666667f*(u+w); f12= f15-0.166666667f*(u-w); // f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+ // (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)); // f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+ // (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)); // f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+ // (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)); // f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+ // (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)); // f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+ // (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)); // f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+ // (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)); // f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+ // (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)); // f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+ // (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)); // f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+ // (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)); // f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+ // (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)); // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(x == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(x == XDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; // f1 =(0.166666667f*u)+ // (f3-(-(0.166666667f*u))); f4 = f2-0.33333333f*v; // f5 =(0.0833333333f*( u+v))+ // (f7-(0.0833333333f*(-u-v))); f7 = f5-0.166666667f*(u+v); // f8 =(0.0833333333f*( u-v ))+ // (f6-(0.0833333333f*(-u+v ))); f8 = f6+0.166666667f*(u-v); // f10=(0.0833333333f*( u+w))+ // (f17-(0.0833333333f*(-u-w))); f13= f16-0.166666667f*(v-w); // f15=(0.0833333333f*( u-w))+ // (f12-(0.0833333333f*(-u+w))); f18= f11-0.166666667f*(v+w); // //float feq0 = 0.1904761791f*rho+-0.597127747f*usqr //float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; //float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; //float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; // // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { // if(x == 0){ // f2 = f4; // f6 = f7; // f11 = f13; // f16 = f18; // } // else if(x == XDIM-1){ // f4 = f2; // f7 = f6; // f13 = f11; // f18 = f16; // } if(z == 0){ f9 = f14; f10 = f15; f11 = f16; f12 = f17; f13 = f18; } else if(z == ZDIM-1){ f14 = f9; f15 = f10; f16 = f11; f17 = f12; f18 = f13; } float u,v,w;//,rho; u = UMAX; v = 0.0f;//0.0; w = 0.0f; // float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i // float u2 = u*u; // float v2 = v*v; // float w2 = w*w; // float usqr = u2+v2+w2; f2 = f4 +0.33333333f*v; f5 = f7 +0.166666667f*(u+v); f6 = f8 -0.166666667f*(u-v); f16= f13+0.166666667f*(v-w); f11= f18+0.166666667f*(v+w); // //float feq0 = 0.1904761791f*rho+-0.597127747f*usqr //float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w)); //float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); //float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; //float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; //float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; //float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) //float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; //float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; //float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; //float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; // // float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17; // float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18; // float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18; } __device__ void xsymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13=f18; f11=f18; f16=f18; f6 =f7; f9 =f14; f12=f17; } else if(y == 0 && z == ZDIM-1){ f4 = f2; f11=f13; f18=f13; f16=f13; f6 =f7; f14=f9; f17=f12; } else if(y == YDIM-1 && z == 0){ f4 = f2; f11=f16; f18=f16; f13=f16; f7 =f6; f9 =f14; f12=f17; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f16=f11; f18=f11; f13=f11; f7 =f6; f14=f9; f17=f12; } else{ if(y == 0){ f2 = f4; f11=f13; f16=f18; f8 = f5; } else if(y == YDIM-1){ f4=f2 ; f13=f11; f18=f16; f5=f8 ; } // if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } } f1 = f3 ; f5 = f6 ; f8 = f7 ; f10= f12; f15= f17; } __device__ void xsymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z) { if(y == 0 && z == 0){ f2 = f4; f13 = f18; f11 = f18; f16 = f18; f5 = f8; f9 = f14; f10 = f15; } else if(y == 0 && z == ZDIM-1){ f2 = f4; f11 = f13; f18 = f13; f16 = f13; f5 = f8; f14 = f9; f15 = f10; } else if(y == YDIM-1 && z == 0){ f4 = f2; f18 = f16; f11 = f16; f13 = f16; f8 = f5; f9 = f14; f10 = f15; } else if(y == YDIM-1 && z == ZDIM-1){ f4 = f2; f13 = f11; f16 = f11; f18 = f11; f8 = f5; f14 = f9; f15 = f10; } else{ if(y == 0){ f2 = f4; f11 = f13; f16 = f18; f5 = f8; } else if(y == YDIM-1){ f4 = f2; f13 = f11; f18 = f16; f8 = f5; } // else if(z == 0){ // f9 = f14; // f10 = f15; // f11 = f16; // f12 = f17; // f13 = f18; // } // else if(z == ZDIM-1){ // f14 = f9; // f15 = f10; // f16 = f11; // f17 = f12; // f18 = f13; // } } f3 = f1 ; f6 = f5 ; f7 = f8 ; f12= f10; f17= f15; } __device__ void ysymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int z) { if(z == 0){ f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } if(z == ZDIM-1){ f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } __device__ void ysymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int z) { if(z == 0){ f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } if(z == ZDIM-1){ f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } __device__ void zsymmetry_top(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y) { if(y == 0){ f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } if(y == YDIM-1){ f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } f14= f9 ; f15= f10; f16= f11; f17= f12; f18= f13; } __device__ void zsymmetry_bot(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y) { if(y == 0){ f2 = f4 ; f6 = f7 ; f5 = f8 ; f11= f13; f16= f18; } if(y == YDIM-1){ f4 = f2 ; f7 = f6 ; f8 = f5 ; f13= f11; f18= f16; } f9 = f14; f10= f15; f11= f16; f12= f17; f13= f18; } inline __device__ void boundaries(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z, int im) { // if(im == 3)//DirichletWest // { // DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 53)//DirichletWest { //DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 54)//DirichletWest { //NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } // if(im == 4)//DirichletWest // { // NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // if(im == 13)//DirichletWest // { // DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 14)//DirichletWest // { // NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 15)//DirichletNorth // { // DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // if(im == 16)//DirichletSouth // { // DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 21)//ysymm top { ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 22)//ysymm bot { ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 23)//zsymm top { zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 24)//zsymm bot { zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } } inline __device__ void boundaries_force(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, int y, int z, int im) { // if(im == 3)//DirichletWest // { // DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } if(im == 53)//DirichletWest { DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); //DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 54)//DirichletWest { NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); //NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } // else if(im == 15)//DirichletNorth // { // DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } // else if(im == 16)//DirichletSouth // { // DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); // } else if(im == 21)//ysymm top { ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 22)//ysymm bot { ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z); } else if(im == 23)//zsymm top { zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 24)//zsymm bot { zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y); } else if(im == 25)//zsymm top { xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } else if(im == 26)//zsymm bot { xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z); } } texture<float,2,cudaReadModeElementType> texRef_f0A; texture<float,2,cudaReadModeElementType> texRef_f1A; texture<float,2,cudaReadModeElementType> texRef_f2A; texture<float,2,cudaReadModeElementType> texRef_f3A; texture<float,2,cudaReadModeElementType> texRef_f4A; texture<float,2,cudaReadModeElementType> texRef_f5A; texture<float,2,cudaReadModeElementType> texRef_f6A; texture<float,2,cudaReadModeElementType> texRef_f7A; texture<float,2,cudaReadModeElementType> texRef_f8A; texture<float,2,cudaReadModeElementType> texRef_f9A; texture<float,2,cudaReadModeElementType> texRef_f10A; texture<float,2,cudaReadModeElementType> texRef_f11A; texture<float,2,cudaReadModeElementType> texRef_f12A; texture<float,2,cudaReadModeElementType> texRef_f13A; texture<float,2,cudaReadModeElementType> texRef_f14A; texture<float,2,cudaReadModeElementType> texRef_f15A; texture<float,2,cudaReadModeElementType> texRef_f16A; texture<float,2,cudaReadModeElementType> texRef_f17A; texture<float,2,cudaReadModeElementType> texRef_f18A; texture<float,2,cudaReadModeElementType> texRef_f0B; texture<float,2,cudaReadModeElementType> texRef_f1B; texture<float,2,cudaReadModeElementType> texRef_f2B; texture<float,2,cudaReadModeElementType> texRef_f3B; texture<float,2,cudaReadModeElementType> texRef_f4B; texture<float,2,cudaReadModeElementType> texRef_f5B; texture<float,2,cudaReadModeElementType> texRef_f6B; texture<float,2,cudaReadModeElementType> texRef_f7B; texture<float,2,cudaReadModeElementType> texRef_f8B; texture<float,2,cudaReadModeElementType> texRef_f9B; texture<float,2,cudaReadModeElementType> texRef_f10B; texture<float,2,cudaReadModeElementType> texRef_f11B; texture<float,2,cudaReadModeElementType> texRef_f12B; texture<float,2,cudaReadModeElementType> texRef_f13B; texture<float,2,cudaReadModeElementType> texRef_f14B; texture<float,2,cudaReadModeElementType> texRef_f15B; texture<float,2,cudaReadModeElementType> texRef_f16B; texture<float,2,cudaReadModeElementType> texRef_f17B; texture<float,2,cudaReadModeElementType> texRef_f18B; texture<float,2,cudaReadModeElementType> texRef_f0C; texture<float,2,cudaReadModeElementType> texRef_f1C; texture<float,2,cudaReadModeElementType> texRef_f2C; texture<float,2,cudaReadModeElementType> texRef_f3C; texture<float,2,cudaReadModeElementType> texRef_f4C; texture<float,2,cudaReadModeElementType> texRef_f5C; texture<float,2,cudaReadModeElementType> texRef_f6C; texture<float,2,cudaReadModeElementType> texRef_f7C; texture<float,2,cudaReadModeElementType> texRef_f8C; texture<float,2,cudaReadModeElementType> texRef_f9C; texture<float,2,cudaReadModeElementType> texRef_f10C; texture<float,2,cudaReadModeElementType> texRef_f11C; texture<float,2,cudaReadModeElementType> texRef_f12C; texture<float,2,cudaReadModeElementType> texRef_f13C; texture<float,2,cudaReadModeElementType> texRef_f14C; texture<float,2,cudaReadModeElementType> texRef_f15C; texture<float,2,cudaReadModeElementType> texRef_f16C; texture<float,2,cudaReadModeElementType> texRef_f17C; texture<float,2,cudaReadModeElementType> texRef_f18C; texture<float,2,cudaReadModeElementType> texRef_f0D; texture<float,2,cudaReadModeElementType> texRef_f1D; texture<float,2,cudaReadModeElementType> texRef_f2D; texture<float,2,cudaReadModeElementType> texRef_f3D; texture<float,2,cudaReadModeElementType> texRef_f4D; texture<float,2,cudaReadModeElementType> texRef_f5D; texture<float,2,cudaReadModeElementType> texRef_f6D; texture<float,2,cudaReadModeElementType> texRef_f7D; texture<float,2,cudaReadModeElementType> texRef_f8D; texture<float,2,cudaReadModeElementType> texRef_f9D; texture<float,2,cudaReadModeElementType> texRef_f10D; texture<float,2,cudaReadModeElementType> texRef_f11D; texture<float,2,cudaReadModeElementType> texRef_f12D; texture<float,2,cudaReadModeElementType> texRef_f13D; texture<float,2,cudaReadModeElementType> texRef_f14D; texture<float,2,cudaReadModeElementType> texRef_f15D; texture<float,2,cudaReadModeElementType> texRef_f16D; texture<float,2,cudaReadModeElementType> texRef_f17D; texture<float,2,cudaReadModeElementType> texRef_f18D; int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } inline __device__ void bgk_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr)); // f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); // f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); // f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); // f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); // f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); // f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); // f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); // f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); // f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); // f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); // f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr)); // f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); // f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr)); // f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); // f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); // f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); // f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); // f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr)); f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)); f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr)); f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr)); f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr)); f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)); f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr)); f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr)); f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)); f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr)); f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)); f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr)); f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr)); f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr)); f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr)); f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)); f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr)); f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr)); f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr)); } inline __device__ void mrt_collide(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega) { float u,v,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; float usqr = u*u+v*v+w*w; // u = rho*u; // v = rho*v; // w = rho*w; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //COMPUTE M-MEQ //m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; //m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; //m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); // m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); // m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); // m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; m13 = f5+-f6+ f7+-f8 -u*v; m14 = f11 +- f13 + - f16 + f18 -v*w; m15 = f10 + - f12 +-f15 + f17 -u*w; m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; if(SmagLES == "YES"){ //// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9)); //// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11))); //// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11))); // float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9; // float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); // //float Cs = 0.01f; // omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f); // //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR); // //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f; float tau0 = 1.f/omega; //float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR); //float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR); //omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f); //float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q)); float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q)); omega = 1.f/tau; //float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f; //omega = 1.f/tau; } f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega, float Cs) { float u,v,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; float usqr = u*u+v*v+w*w; // u = rho*u; // v = rho*v; // w = rho*w; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; //COMPUTE M-MEQ //m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); //m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17; //m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18; //m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18; m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18); m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w); // m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); // m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); // m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; m13 = f5+-f6+ f7+-f8 -u*v; m14 = f11 +- f13 + - f16 + f18 -v*w; m15 = f10 + - f12 +-f15 + f17 -u*w; m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; if(SmagLES == "YES"){ // float PI11 = -0.026315789f*m1-0.5f *omega*m9; // float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // // float PI12 = -1.5f*omega*m13; // float PI23 = -1.5f*omega*m14; // float PI13 = -1.5f*omega*m15; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); // omega = 1.0f/(1.0f/omega+3.f*CS*Smag); // float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9)); // float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11))); // float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11))); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13); // omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // //float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS); // ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag); // //float tau0 = 1.f/omega; //float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f; //omega = 1.f/tau; float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float nu0 = ((1.0f/omega)-0.5f)/3.0f; float tau0 = 1.f/omega; //float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR); //float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS); //omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f); float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q)); omega = 1.f/tau; } f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2); f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10); f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12); f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))); f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))); f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12); f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))); f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14))); f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))); f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14))); } inline __device__ void vel_av(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, int t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; uAv = (uAv*(t-START_VELAV)+u)/((t-START_VELAV)+1); vAv = (vAv*(t-START_VELAV)+v)/((t-START_VELAV)+1); } inline __device__ void vel_avLR(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; uAv = (uAv*(t-START_VELAV)+u*LRFACTOR)/((t-START_VELAV)+LRFACTOR); vAv = (vAv*(t-START_VELAV)+v*LRFACTOR)/((t-START_VELAV)+LRFACTOR); } inline __device__ void vel_fluc(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float& ufluc, float& vfluc, int t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u)/((t-START_VELFLUC)+1); vfluc = (vfluc*(t-START_VELFLUC)+v)/((t-START_VELFLUC)+1); } inline __device__ void vel_flucLR(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float& uAv, float& vAv, float& ufluc, float& vfluc, float t) { float u,v;//,w; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; u = (u-uAv)*(u-uAv); v = (v-vAv)*(v-vAv); ufluc = (ufluc*(t-START_VELFLUC)+u*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); vfluc = (vfluc*(t-START_VELFLUC)+v*LRFACTOR)/((t-START_VELFLUC)+LRFACTOR); } inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float SF) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.3333333333f*(rho-1.5f*usqr); float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float SF) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; //float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w); //float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w); //float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17); //float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18); //float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18); //float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u); //float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v); //float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w); //float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); //float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18; //float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); //float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ; //float m13 = f5+-f6+ f7+-f8 -u*v; //float m14 = f11 +- f13 + - f16 + f18 -v*w; //float m15 = f10 + - f12 +-f15 + f17 -u*w; //float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ; //float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18; //float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18; float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); float m13 = f5+-f6+ f7+-f8 -u*v; float m14 = f11 +- f13 + - f16 + f18 -v*w; float m15 = f10 + - f12 +-f15 + f17 -u*w; float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f); float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); float PI11 = -0.026315789f*m1-0.5f *omega*m9; float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); float PI12 = -1.5f*omega*m13; float PI23 = -1.5f*omega*m14; float PI13 = -1.5f*omega*m15; //we know Smag on coarse mesh float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f); //omega = 1.0f/(1.0f/omega+3.f*CS*Smag); //omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR); //omega = 1.0f/(1.0f/omega +3.f*CS*Smag); //omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR); //omega = 1.0f/(1.0f/omega +3.f*CS*Smag); //omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR); //float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); //float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); //float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); //float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); // //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; //float tau0c = 1.f/omega; //float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh //omega = 1.f/tau;//total omega on coarse mesh //tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q)); //omega2= 1.f/tau; SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2/LRFACTOR);//for post-collision //SF = omega*0.5f/omega2;//for post-streaming, pre-collision? f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2, float& f3 , float& f4 , float& f5 , float& f6 , float& f7 , float& f8 , float& f9, float& f10, float& f11, float& f12, float& f13, float& f14, float& f15, float& f16, float& f17, float& f18, float omega, float omega2) { float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+ f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); float m13 = f5+-f6+ f7+-f8 -u*v; float m14 = f11 +- f13 + - f16 + f18 -v*w; float m15 = f10 + - f12 +-f15 + f17 -u*w; //float PI11 = -0.026315789f*m1-0.5f *omega*m9; //float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); //float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); //float PI12 = -1.5f*omega*m13; //float PI23 = -1.5f*omega*m14; //float PI13 = -1.5f*omega*m15; ////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2) //float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; ////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f); ////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR); ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f)); ////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR); ////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f)); //float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); //float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); //float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); //float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); //float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); // //float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); //float nu0 = ((1.0f/omega)-0.5f)/3.0f; //float tau0f = 1.f/omega2; //float tau0c = 1.f/omega; //float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine //omega2 = 1.f/tau;//total omega on fine mesh //tau = LRLEVEL*(tau-tau0f)+tau0c; //omega= 1.f/tau; //tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q)); float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2/LRFACTOR); //float SF = omega2*2.f/omega; //float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2)); //SF = omega*2.f/omega2; f0 =SF*f0 +(1.0f-SF)*feq0 ; f1 =SF*f1 +(1.0f-SF)*feq1 ; f2 =SF*f2 +(1.0f-SF)*feq2 ; f3 =SF*f3 +(1.0f-SF)*feq3 ; f4 =SF*f4 +(1.0f-SF)*feq4 ; f5 =SF*f5 +(1.0f-SF)*feq5 ; f6 =SF*f6 +(1.0f-SF)*feq6 ; f7 =SF*f7 +(1.0f-SF)*feq7 ; f8 =SF*f8 +(1.0f-SF)*feq8 ; f9 =SF*f9 +(1.0f-SF)*feq9 ; f10=SF*f10+(1.0f-SF)*feq10; f11=SF*f11+(1.0f-SF)*feq11; f12=SF*f12+(1.0f-SF)*feq12; f13=SF*f13+(1.0f-SF)*feq13; f14=SF*f14+(1.0f-SF)*feq14; f15=SF*f15+(1.0f-SF)*feq15; f16=SF*f16+(1.0f-SF)*feq16; f17=SF*f17+(1.0f-SF)*feq17; f18=SF*f18+(1.0f-SF)*feq18; } inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*ZDIM; } inline __device__ int f_memLR(int f_num, int x, int y, int z, size_t pitch) { return (x+y*pitch+z*YLRDIM*pitch)+f_num*pitch*YLRDIM*ZLRDIM; } __device__ int dmin(int a, int b) { if (a<b) return a; else return b-1; } __device__ int dmax(int a) { if (a>-1) return a; else return 0; } __device__ int dmin_p(int a, int b) { if (a<b) return a; else return 0; } __device__ int dmax_p(int a, int b) { if (a>-1) return a; else return b-1; } __global__ void simple_copy(float* fA, float* fB, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int k = dmin(x+1,XDIM)+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = fA[k];//+0.01f; } __global__ void simple_text(float* fA, float* fB, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) fB[j] = tex2D(texRef_f0A,x+1,y);//+0.01f; } __global__ void ExtractFromC_d(float* fout, size_t pitch, float omega, float omega2)//pitch in elements //size_t pitch, float SF)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(x < LRX0+1 || x > LRX0+XLRDIM-2 || y < LRY0+1 || y > LRY0+YLRDIM-2 || z < LRZ0+1 || z > LRZ0+ZLRDIM-2) // //if(x < LRX0+2 || x > LRX0+XLRDIM-3 || y < LRY0+2 || y > LRY0+YLRDIM-3 || z < LRZ0+2 || z > LRZ0+ZLRDIM-3) // { // //do nothing // } // else{ // if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+ZLRDIM*LRFACTOR-1) && // (x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1) || z == int(LRZ0+2) || z == int(LRY0+ZLRDIM*LRFACTOR-1)) ) //if( (x > LRX0+3 && x < LRX0+XLRDIM*LRFACTOR-3 && y > LRY0+3 && y < LRY0+YLRDIM*LRFACTOR-3))// && if( (x > LRX0+1 && x < LRX0+XLRDIM*LRFACTOR-1 && y > LRY0+1 && y < LRY0+YLRDIM*LRFACTOR-1))// && //(x == int(LRX0+2) || x == int(LRX0+XLRDIM*LRFACTOR-1) || y == int(LRY0+2) || y == int(LRY0+YLRDIM*LRFACTOR-1)) ) { // if(x > 10 && y > 10 && z > 10 && x < 20 && y < 20 && z < 20) // { float xcoord = LRLEVEL*(x-LRX0)+0.5f; float ycoord = LRLEVEL*(y-LRY0)+0.5f; float zcoord = LRLEVEL*(z-LRZ0); int zminus = int(zcoord); int zplus = zminus+1; f0 = (zplus-zcoord)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0C ,xcoord,ycoord+YLRDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2C ,xcoord,ycoord+YLRDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4C ,xcoord,ycoord+YLRDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9C ,xcoord,ycoord+YLRDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11C,xcoord,ycoord+YLRDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13C,xcoord,ycoord+YLRDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14C,xcoord,ycoord+YLRDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16C,xcoord,ycoord+YLRDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18C,xcoord,ycoord+YLRDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1C ,xcoord,ycoord+YLRDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3C ,xcoord,ycoord+YLRDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5C ,xcoord,ycoord+YLRDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6C ,xcoord,ycoord+YLRDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7C ,xcoord,ycoord+YLRDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8C ,xcoord,ycoord+YLRDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15C,xcoord,ycoord+YLRDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17C,xcoord,ycoord+YLRDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10C,xcoord,ycoord+YLRDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12C,xcoord,ycoord+YLRDIM*(zplus)); if(MODEL == "MRT") mrt_scale_fc_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,omega2); //mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); // else // bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } } __global__ void LR_d_ABCD_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t, float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // sumX[threadIdx.x]=0.f; // sumY[threadIdx.x]=0.f; // sumZ[threadIdx.x]=0.f; // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB if(im == 10){ check[0] = 1; //sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; //sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; //sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) syncthreads(); if(check[0] == 1 && t>=STARTF){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } } __global__ void LR_d_BACD_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // sumX[threadIdx.x]=0.f; // sumY[threadIdx.x]=0.f; // sumZ[threadIdx.x]=0.f; // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB if(im == 10){ check[0] = 1; //sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; //sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; //sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6;//-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumX[threadIdx.x]+=-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6;//-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumY[threadIdx.x]+=-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13;//-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; sumZ[threadIdx.x]+=-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) syncthreads(); if(check[0] == 1 && t>=STARTF){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } } __global__ void LR_d_ABCD2(float* fin, float* fout, float omega, size_t pitch, int n, int t,//pitch in elements float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_ABDC2(float* fin, float* fout, float omega, size_t pitch, float SF, int n, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation // int zminus = int(zcoord); // int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //no interp // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } //else f18 = 0.1f; if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_ABDC_Interp(float* fin, float* fout, float omega, size_t pitch, float SF, int t, float *uAv, float *vAv, float *ufluc, float *vfluc)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation int zminus = int(zcoord); int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; if(((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL) && ZPERIODIC == "NO") ||((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL) && ZPERIODIC == "YES")) { if(ZPERIODIC == "YES"){ if(zcoord < 0){ //if zcoord=-0.25f, 1+zcoord=0.75f f0 = (1.f+zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f2 = (1.f+zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f4 = (1.f+zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f9 = (1.f+zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f11= (1.f+zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f13= (1.f+zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f14= (1.f+zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f16= (1.f+zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f18= (1.f+zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f1 = (1.f+zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f3 = (1.f+zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f5 = (1.f+zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f6 = (1.f+zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f7 = (1.f+zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f8 = (1.f+zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f15= (1.f+zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f17= (1.f+zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f10= (1.f+zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f12= (1.f+zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); } else if(zcoord > ZDIM-1){ f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); } else{ //interpolate for next time step. from B //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } } else{ //interpolate for next time step. from B //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8B ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12B,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } if(MODEL == "MRT") mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); else bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } //else f18 = 0.1f; if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+1-LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+1-LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } }//end else (not at edge of LR) } __global__ void LR_d_BADC2(float* fin, float* fout, float omega, size_t pitch, float SF, int n, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation // int zminus = int(zcoord); // int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); // if(x < n || x > XLRDIM-1-n || y < n || y > YLRDIM-1-n || z < n || z > ZLRDIM-1-n) // { // //no interp // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR*n); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR*n); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_BADC_Interp(float* fin, float* fout, float omega, size_t pitch, float SF, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR;//+0.5f is because textures are stored in a voxel centered fashion. we need to change this to vertex centered. float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR;//dont need to +0.5f because z is not using texture interpolation int zminus = int(zcoord); int zplus = zminus+1; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); if(((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL || z < LRLEVEL || z > ZLRDIM-1-LRLEVEL) && ZPERIODIC == "NO") ||((x < LRLEVEL || x > XLRDIM-1-LRLEVEL || y < LRLEVEL || y > YLRDIM-1-LRLEVEL) && ZPERIODIC == "YES")) { if(ZPERIODIC == "YES"){ if(zcoord < 0){ //if zcoord=-0.25f, 1+zcoord=0.75f f0 = (1.f+zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f2 = (1.f+zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f4 = (1.f+zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f9 = (1.f+zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f11= (1.f+zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f13= (1.f+zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f14= (1.f+zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f16= (1.f+zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f18= (1.f+zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f1 = (1.f+zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f3 = (1.f+zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f5 = (1.f+zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f6 = (1.f+zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f7 = (1.f+zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f8 = (1.f+zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f15= (1.f+zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f17= (1.f+zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f10= (1.f+zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); f12= (1.f+zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(ZDIM-1)); } else if(zcoord > ZDIM-1){ f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(0)); } else{ //interpolate for next time step. from A //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } } else{ //interpolate for next time step. from A //YDIM and not YLRDIM f0 = (zplus-zcoord)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f0A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f2 = (zplus-zcoord)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f2A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f4 = (zplus-zcoord)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f4A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f9 = (zplus-zcoord)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f9A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f11= (zplus-zcoord)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f11A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f13= (zplus-zcoord)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f13A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f14= (zplus-zcoord)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f14A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f16= (zplus-zcoord)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f16A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f18= (zplus-zcoord)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f18A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f1 = (zplus-zcoord)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f1A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f3 = (zplus-zcoord)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f3A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f5 = (zplus-zcoord)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f5A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f6 = (zplus-zcoord)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f6A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f7 = (zplus-zcoord)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f7A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f8 = (zplus-zcoord)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f8A ,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f15= (zplus-zcoord)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f15A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f17= (zplus-zcoord)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f17A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f10= (zplus-zcoord)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f10A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); f12= (zplus-zcoord)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zminus))+(zcoord-zminus)*tex2D(texRef_f12A,xcoord+0.5f,ycoord+0.5f+YDIM*(zplus)); } if(MODEL == "MRT") mrt_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); else bgk_scale_cf(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,SF); fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+1-LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+1-LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } }//end else (not at edge of LR) } __global__ void LR_d_ABCD(float* fin, float* fout, float omega, size_t pitch,//pitch in elements int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); float u_Av, v_Av, u_fluc, v_fluc; // if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3) // im = -1;//not valid for extraction // if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2) // { // im = -2;//not valid for second TS // } // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void LR_d_BACD(float* fin, float* fout, float omega, size_t pitch, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int im = ImageFcn(xcoord,ycoord,zcoord); // if(x < 2 || x > LRX0+XLRDIM-3 || y < 2 || y > LRY0+YLRDIM-3 || z < 2 || z > LRZ0+ZLRDIM-3) // im = -1;//not valid for extraction // if(x < 1 || x > LRX0+XLRDIM-2 || y < 1 || y > LRY0+YLRDIM-2 || z < 1 || z > LRZ0+ZLRDIM-2) // { // im = -2;//not valid for second TS // } // if(x < 1 || x > XLRDIM-2 || y < 1 || y > YLRDIM-2 || z < 1 || z > ZLRDIM-2) // { // //dont do anything // } // else{ f0 = fin[j]; f1 = fin[f_memLR(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_memLR(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_memLR(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_memLR(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_memLR(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_memLR(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_memLR(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_memLR(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_memLR(9 ,x ,y ,z-1,pitch)]; f10= fin[f_memLR(10,x-1,y ,z-1,pitch)]; f11= fin[f_memLR(11,x ,y-1,z-1,pitch)]; f12= fin[f_memLR(12,x+1,y ,z-1,pitch)]; f13= fin[f_memLR(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_memLR(9 ,x ,y ,ZLRDIM-1,pitch)]; f10= fin[f_memLR(10,dmax_p(x-1,XLRDIM),y ,ZLRDIM-1,pitch)]; f11= fin[f_memLR(11,x ,dmax_p(y-1,YLRDIM),ZLRDIM-1,pitch)]; f12= fin[f_memLR(12,dmin_p(x+1,XLRDIM),y ,ZLRDIM-1,pitch)]; f13= fin[f_memLR(13,x ,dmin_p(y+1,YLRDIM),ZLRDIM-1,pitch)]; } if(z != ZLRDIM-1){ f14= fin[f_memLR(14,x ,y ,z+1,pitch)]; f15= fin[f_memLR(15,x-1,y ,z+1,pitch)]; f16= fin[f_memLR(16,x ,y-1,z+1,pitch)]; f17= fin[f_memLR(17,x+1,y ,z+1,pitch)]; f18= fin[f_memLR(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_memLR(14,x ,y ,0 ,pitch)]; f15= fin[f_memLR(15,dmax_p(x-1,XLRDIM),y ,0 ,pitch)]; f16= fin[f_memLR(16,x ,dmax_p(y-1,YLRDIM),0 ,pitch)]; f17= fin[f_memLR(17,dmin_p(x+1,XLRDIM),y ,0 ,pitch)]; f18= fin[f_memLR(18,x ,dmin_p(y+1,YLRDIM),0 ,pitch)]; } if(im == 1 || im ==10){//BB fout[f_memLR(1 ,x,y,z,pitch)] = f3 ; fout[f_memLR(2 ,x,y,z,pitch)] = f4 ; fout[f_memLR(3 ,x,y,z,pitch)] = f1 ; fout[f_memLR(4 ,x,y,z,pitch)] = f2 ; fout[f_memLR(5 ,x,y,z,pitch)] = f7 ; fout[f_memLR(6 ,x,y,z,pitch)] = f8 ; fout[f_memLR(7 ,x,y,z,pitch)] = f5 ; fout[f_memLR(8 ,x,y,z,pitch)] = f6 ; fout[f_memLR(9 ,x,y,z,pitch)] = f14; fout[f_memLR(10,x,y,z,pitch)] = f17; fout[f_memLR(11,x,y,z,pitch)] = f18; fout[f_memLR(12,x,y,z,pitch)] = f15; fout[f_memLR(13,x,y,z,pitch)] = f16; fout[f_memLR(14,x,y,z,pitch)] = f9 ; fout[f_memLR(15,x,y,z,pitch)] = f12; fout[f_memLR(16,x,y,z,pitch)] = f13; fout[f_memLR(17,x,y,z,pitch)] = f10; fout[f_memLR(18,x,y,z,pitch)] = f11; } else{ if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ float u_Av, v_Av, u_fluc, v_fluc; if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_avLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t+LRFACTOR); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_flucLR(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t+LRFACTOR); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_memLR(0 ,x,y,z,pitch)] = f0 ; fout[f_memLR(1 ,x,y,z,pitch)] = f1 ; fout[f_memLR(2 ,x,y,z,pitch)] = f2 ; fout[f_memLR(3 ,x,y,z,pitch)] = f3 ; fout[f_memLR(4 ,x,y,z,pitch)] = f4 ; fout[f_memLR(5 ,x,y,z,pitch)] = f5 ; fout[f_memLR(6 ,x,y,z,pitch)] = f6 ; fout[f_memLR(7 ,x,y,z,pitch)] = f7 ; fout[f_memLR(8 ,x,y,z,pitch)] = f8 ; fout[f_memLR(9 ,x,y,z,pitch)] = f9 ; fout[f_memLR(10,x,y,z,pitch)] = f10; fout[f_memLR(11,x,y,z,pitch)] = f11; fout[f_memLR(12,x,y,z,pitch)] = f12; fout[f_memLR(13,x,y,z,pitch)] = f13; fout[f_memLR(14,x,y,z,pitch)] = f14; fout[f_memLR(15,x,y,z,pitch)] = f15; fout[f_memLR(16,x,y,z,pitch)] = f16; fout[f_memLR(17,x,y,z,pitch)] = f17; fout[f_memLR(18,x,y,z,pitch)] = f18; } // }//end else (not at edge of LR) } __global__ void mrt_d_single_force(float* fin, float* fout, float omega, size_t pitch, float *FX, float *FY, float *FZ, int t,float *uAv, float *vAv, float *ufluc, float *vfluc) { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y;//; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; __shared__ float sumX[BLOCKSIZEX], sumY[BLOCKSIZEX], sumZ[BLOCKSIZEX]; __shared__ int check[1]; check[0] = 0; syncthreads(); float u_Av, v_Av, u_fluc, v_fluc; // if((REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 && // y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && // z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 || // (x>XDIM-1))) // { // } // else{ int im = ImageFcn(x,y,z); f0 = fin[j]; f1 = fin[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fin[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fin[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fin[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fin[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fin[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fin[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fin[f_mem(8 ,x-1,y+1,z ,pitch)]; if(z != 0){ f9 = fin[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fin[f_mem(10,x-1,y ,z-1,pitch)]; f11= fin[f_mem(11,x ,y-1,z-1,pitch)]; f12= fin[f_mem(12,x+1,y ,z-1,pitch)]; f13= fin[f_mem(13,x ,y+1,z-1,pitch)]; } else{ f9 = fin[f_mem(9 ,x ,y ,ZDIM-1,pitch)]; f10= fin[f_mem(10,dmax_p(x-1,XDIM),y ,ZDIM-1,pitch)]; f11= fin[f_mem(11,x ,dmax_p(y-1,YDIM),ZDIM-1,pitch)]; f12= fin[f_mem(12,dmin_p(x+1,XDIM),y ,ZDIM-1,pitch)]; f13= fin[f_mem(13,x ,dmin_p(y+1,YDIM),ZDIM-1,pitch)]; } if(z != ZDIM-1){ f14= fin[f_mem(14,x ,y ,z+1,pitch)]; f15= fin[f_mem(15,x-1,y ,z+1,pitch)]; f16= fin[f_mem(16,x ,y-1,z+1,pitch)]; f17= fin[f_mem(17,x+1,y ,z+1,pitch)]; f18= fin[f_mem(18,x ,y+1,z+1,pitch)]; } else{ f14= fin[f_mem(14,x ,y ,0 ,pitch)]; f15= fin[f_mem(15,dmax_p(x-1,XDIM),y ,0 ,pitch)]; f16= fin[f_mem(16,x ,dmax_p(y-1,YDIM),0 ,pitch)]; f17= fin[f_mem(17,dmin_p(x+1,XDIM),y ,0 ,pitch)]; f18= fin[f_mem(18,x ,dmin_p(y+1,YDIM),0 ,pitch)]; } if(im == 1 || im == 10){//BB if(im == 10){ check[0] = 1; sumX[threadIdx.x]=2.f*f1-2.f*f3+2.f*f5+2.f*f8-2.f*f6-2.f*f7+2.f*f10-2.f*f12+2.f*f15-2.f*f17; sumY[threadIdx.x]=2.f*f2-2.f*f4+2.f*f5-2.f*f8+2.f*f6-2.f*f7+2.f*f11-2.f*f13+2.f*f16-2.f*f18; sumZ[threadIdx.x]=2.f*f9+2.f*f10+2.f*f11+2.f*f12+2.f*f13-2.f*f14-2.f*f15-2.f*f16-2.f*f17-2.f*f18; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; } fout[j+pitch*YDIM*ZDIM*1 ] = f3 ; fout[j+pitch*YDIM*ZDIM*2 ] = f4 ; fout[j+pitch*YDIM*ZDIM*3 ] = f1 ; fout[j+pitch*YDIM*ZDIM*4 ] = f2 ; fout[j+pitch*YDIM*ZDIM*5 ] = f7 ; fout[j+pitch*YDIM*ZDIM*6 ] = f8 ; fout[j+pitch*YDIM*ZDIM*7 ] = f5 ; fout[j+pitch*YDIM*ZDIM*8 ] = f6 ; fout[j+pitch*YDIM*ZDIM*9 ] = f14; fout[j+pitch*YDIM*ZDIM*10] = f17; fout[j+pitch*YDIM*ZDIM*11] = f18; fout[j+pitch*YDIM*ZDIM*12] = f15; fout[j+pitch*YDIM*ZDIM*13] = f16; fout[j+pitch*YDIM*ZDIM*14] = f9 ; fout[j+pitch*YDIM*ZDIM*15] = f12; fout[j+pitch*YDIM*ZDIM*16] = f13; fout[j+pitch*YDIM*ZDIM*17] = f10; fout[j+pitch*YDIM*ZDIM*18] = f11; } else{ sumX[threadIdx.x]=0.f; sumY[threadIdx.x]=0.f; sumZ[threadIdx.x]=0.f; boundaries_force(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(im == 100)//north outlet { f0 = fin[f_mem(0 ,x,y-1,z,pitch)]; f1 = fin[f_mem(1 ,x,y-1,z,pitch)]; f3 = fin[f_mem(3 ,x,y-1,z,pitch)]; f2 = fin[f_mem(2 ,x,y-1,z,pitch)]; f5 = fin[f_mem(5 ,x,y-1,z,pitch)]; f6 = fin[f_mem(6 ,x,y-1,z,pitch)]; f4 = fin[f_mem(4 ,x,y-1,z,pitch)]; f7 = fin[f_mem(7 ,x,y-1,z,pitch)]; f8 = fin[f_mem(8 ,x,y-1,z,pitch)]; f9 = fin[f_mem(9 ,x,y-1,z,pitch)]; f10= fin[f_mem(10,x,y-1,z,pitch)]; f11= fin[f_mem(11,x,y-1,z,pitch)]; f12= fin[f_mem(12,x,y-1,z,pitch)]; f13= fin[f_mem(13,x,y-1,z,pitch)]; f14= fin[f_mem(14,x,y-1,z,pitch)]; f15= fin[f_mem(15,x,y-1,z,pitch)]; f16= fin[f_mem(16,x,y-1,z,pitch)]; f17= fin[f_mem(17,x,y-1,z,pitch)]; f18= fin[f_mem(18,x,y-1,z,pitch)]; float rho,u,v,w; rho = 1.0f; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2)); f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10)); f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10)); f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); } if(im == 200)//south inlet { f0 = fin[f_mem(0 ,x,y+1,z,pitch)]; f1 = fin[f_mem(1 ,x,y+1,z,pitch)]; f3 = fin[f_mem(3 ,x,y+1,z,pitch)]; f2 = fin[f_mem(2 ,x,y+1,z,pitch)]; f5 = fin[f_mem(5 ,x,y+1,z,pitch)]; f6 = fin[f_mem(6 ,x,y+1,z,pitch)]; f4 = fin[f_mem(4 ,x,y+1,z,pitch)]; f7 = fin[f_mem(7 ,x,y+1,z,pitch)]; f8 = fin[f_mem(8 ,x,y+1,z,pitch)]; f9 = fin[f_mem(9 ,x,y+1,z,pitch)]; f10= fin[f_mem(10,x,y+1,z,pitch)]; f11= fin[f_mem(11,x,y+1,z,pitch)]; f12= fin[f_mem(12,x,y+1,z,pitch)]; f13= fin[f_mem(13,x,y+1,z,pitch)]; f14= fin[f_mem(14,x,y+1,z,pitch)]; f15= fin[f_mem(15,x,y+1,z,pitch)]; f16= fin[f_mem(16,x,y+1,z,pitch)]; f17= fin[f_mem(17,x,y+1,z,pitch)]; f18= fin[f_mem(18,x,y+1,z,pitch)]; float rho,u,v,w; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18; u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = UMAX;//f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18; m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18; m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18; m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ; m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18; m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18; m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18; m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ; m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ; m13 = f5+ - f6+ f7+ - f8 ; m14 = f11 + - f13 + - f16 + f18; m15 = f10 + - f12 + - f15 + f17 ; m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ; m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18; m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18; f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2)); f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10)); f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10)); f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12)); f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)))); f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)))); f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12)); f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)))); f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14)))); f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)))); f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14)))); } float Cs = 0.01f; //if(XDIM-x < 64.f) ////Cs = 0.01f+(x-64.f)/64.f*(x-64.f)/64.f*0.1f; //Cs = 0.01f*pow(2.f,((x-448.f)/16.f)); if(MODEL == "MRT") mrt_collide_LES(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega,CS); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); if(VELAV == "YES"){ if(t>=START_VELAV && t<START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; vel_av(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,t); uAv[j] = u_Av; vAv[j] = v_Av; } else if(t>=START_VELFLUC){ u_Av = uAv[j]; v_Av = vAv[j]; u_fluc = ufluc[j]; v_fluc = vfluc[j]; vel_fluc(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,u_Av,v_Av,u_fluc,v_fluc,t); ufluc[j] = u_fluc; vfluc[j] = v_fluc; } } fout[f_mem(0 ,x,y,z,pitch)] = f0 ; fout[f_mem(1 ,x,y,z,pitch)] = f1 ; fout[f_mem(2 ,x,y,z,pitch)] = f2 ; fout[f_mem(3 ,x,y,z,pitch)] = f3 ; fout[f_mem(4 ,x,y,z,pitch)] = f4 ; fout[f_mem(5 ,x,y,z,pitch)] = f5 ; fout[f_mem(6 ,x,y,z,pitch)] = f6 ; fout[f_mem(7 ,x,y,z,pitch)] = f7 ; fout[f_mem(8 ,x,y,z,pitch)] = f8 ; fout[f_mem(9 ,x,y,z,pitch)] = f9 ; fout[f_mem(10,x,y,z,pitch)] = f10; fout[f_mem(11,x,y,z,pitch)] = f11; fout[f_mem(12,x,y,z,pitch)] = f12; fout[f_mem(13,x,y,z,pitch)] = f13; fout[f_mem(14,x,y,z,pitch)] = f14; fout[f_mem(15,x,y,z,pitch)] = f15; fout[f_mem(16,x,y,z,pitch)] = f16; fout[f_mem(17,x,y,z,pitch)] = f17; fout[f_mem(18,x,y,z,pitch)] = f18; } syncthreads(); if(check[0] == 1 && t>=STARTF && REFINEMENT == "NO"){ //reduction for force int nTotalThreads = blockDim.x; while(nTotalThreads > 1){ int halfPoint = (nTotalThreads >> 1); if(threadIdx.x < halfPoint){ sumX[threadIdx.x] += sumX[threadIdx.x+halfPoint]; sumY[threadIdx.x] += sumY[threadIdx.x+halfPoint]; sumZ[threadIdx.x] += sumZ[threadIdx.x+halfPoint]; } syncthreads(); nTotalThreads = halfPoint; } if(threadIdx.x == 0){ atomicAdd(&FX[t],sumX[0]); atomicAdd(&FY[t],sumY[0]); atomicAdd(&FZ[t],sumZ[0]); } } // } } __global__ void mrt_d_single(float* fA, float* fB, float omega, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; // if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1 // && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 || // (x>XDIM-1)){ // } // else{ f0 = fA[j]; f1 = fA[f_mem(1 ,x-1,y ,z ,pitch)]; f3 = fA[f_mem(3 ,x+1,y ,z ,pitch)]; f2 = fA[f_mem(2 ,x ,y-1,z ,pitch)]; f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch)]; f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch)]; f4 = fA[f_mem(4 ,x ,y+1,z ,pitch)]; f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch)]; f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch)]; f9 = fA[f_mem(9 ,x ,y ,z-1,pitch)]; f10= fA[f_mem(10,x-1,y ,z-1,pitch)]; f11= fA[f_mem(11,x ,y-1,z-1,pitch)]; f12= fA[f_mem(12,x+1,y ,z-1,pitch)]; f13= fA[f_mem(13,x ,y+1,z-1,pitch)]; f14= fA[f_mem(14,x ,y ,z+1,pitch)]; f15= fA[f_mem(15,x-1,y ,z+1,pitch)]; f16= fA[f_mem(16,x ,y-1,z+1,pitch)]; f17= fA[f_mem(17,x+1,y ,z+1,pitch)]; //f18= fA[f_mem(18,x ,y+1,dmin(z+1,ZDIM),pitch)]; if(z != ZDIM-1) f18= fA[f_mem(18,x ,y+1,z+1,pitch)]; if(im == 1 || im ==10){//BB fB[f_mem(1 ,x,y,z,pitch)] = f3 ; fB[f_mem(2 ,x,y,z,pitch)] = f4 ; fB[f_mem(3 ,x,y,z,pitch)] = f1 ; fB[f_mem(4 ,x,y,z,pitch)] = f2 ; fB[f_mem(5 ,x,y,z,pitch)] = f7 ; fB[f_mem(6 ,x,y,z,pitch)] = f8 ; fB[f_mem(7 ,x,y,z,pitch)] = f5 ; fB[f_mem(8 ,x,y,z,pitch)] = f6 ; fB[f_mem(9 ,x,y,z,pitch)] = f14; fB[f_mem(10,x,y,z,pitch)] = f17; fB[f_mem(11,x,y,z,pitch)] = f18; fB[f_mem(12,x,y,z,pitch)] = f15; fB[f_mem(13,x,y,z,pitch)] = f16; fB[f_mem(14,x,y,z,pitch)] = f9 ; fB[f_mem(15,x,y,z,pitch)] = f12; fB[f_mem(16,x,y,z,pitch)] = f13; fB[f_mem(17,x,y,z,pitch)] = f10; fB[f_mem(18,x,y,z,pitch)] = f11; } else{ boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im); if(MODEL == "MRT") mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); else if(MODEL == "BGK") bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega); fB[f_mem(0 ,x,y,z,pitch)] = f0 ; fB[f_mem(1 ,x,y,z,pitch)] = f1 ; fB[f_mem(2 ,x,y,z,pitch)] = f2 ; fB[f_mem(3 ,x,y,z,pitch)] = f3 ; fB[f_mem(4 ,x,y,z,pitch)] = f4 ; fB[f_mem(5 ,x,y,z,pitch)] = f5 ; fB[f_mem(6 ,x,y,z,pitch)] = f6 ; fB[f_mem(7 ,x,y,z,pitch)] = f7 ; fB[f_mem(8 ,x,y,z,pitch)] = f8 ; fB[f_mem(9 ,x,y,z,pitch)] = f9 ; fB[f_mem(10,x,y,z,pitch)] = f10; fB[f_mem(11,x,y,z,pitch)] = f11; fB[f_mem(12,x,y,z,pitch)] = f12; fB[f_mem(13,x,y,z,pitch)] = f13; fB[f_mem(14,x,y,z,pitch)] = f14; fB[f_mem(15,x,y,z,pitch)] = f15; fB[f_mem(16,x,y,z,pitch)] = f16; fB[f_mem(17,x,y,z,pitch)] = f17; fB[f_mem(18,x,y,z,pitch)] = f18; } // } } __device__ __inline__ float ld_gb1_cg(const float *addr){ float return_value; asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr)); return return_value; } __global__ void initialize_single(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) int im = ImageFcn(x,y,z); float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = UMAX; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; if(MODEL == "BGK"){ f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } else{ float f0 = 0.1904761791f*rho+-0.597127747f*usqr ; float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); f1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f[j+0 *pitch*YDIM*ZDIM]=f0 ; f[j+1 *pitch*YDIM*ZDIM]=f1 ; f[j+2 *pitch*YDIM*ZDIM]=f2 ; f[j+3 *pitch*YDIM*ZDIM]=f3 ; f[j+4 *pitch*YDIM*ZDIM]=f4 ; f[j+5 *pitch*YDIM*ZDIM]=f5 ; f[j+6 *pitch*YDIM*ZDIM]=f6 ; f[j+7 *pitch*YDIM*ZDIM]=f7 ; f[j+8 *pitch*YDIM*ZDIM]=f8 ; f[j+9 *pitch*YDIM*ZDIM]=f9 ; f[j+10*pitch*YDIM*ZDIM]=f10; f[j+11*pitch*YDIM*ZDIM]=f11; f[j+12*pitch*YDIM*ZDIM]=f12; f[j+13*pitch*YDIM*ZDIM]=f13; f[j+14*pitch*YDIM*ZDIM]=f14; f[j+15*pitch*YDIM*ZDIM]=f15; f[j+16*pitch*YDIM*ZDIM]=f16; f[j+17*pitch*YDIM*ZDIM]=f17; f[j+18*pitch*YDIM*ZDIM]=f18; } if(x == XDIM-1){ for(int i = XDIM; i<pitch; i++){ j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) f[j+0 *pitch*YDIM*ZDIM]=0.f; f[j+1 *pitch*YDIM*ZDIM]=0.f; f[j+2 *pitch*YDIM*ZDIM]=0.f; f[j+3 *pitch*YDIM*ZDIM]=0.f; f[j+4 *pitch*YDIM*ZDIM]=0.f; f[j+5 *pitch*YDIM*ZDIM]=0.f; f[j+6 *pitch*YDIM*ZDIM]=0.f; f[j+7 *pitch*YDIM*ZDIM]=0.f; f[j+8 *pitch*YDIM*ZDIM]=0.f; f[j+9 *pitch*YDIM*ZDIM]=0.f; f[j+10*pitch*YDIM*ZDIM]=0.f; f[j+11*pitch*YDIM*ZDIM]=0.f; f[j+12*pitch*YDIM*ZDIM]=0.f; f[j+13*pitch*YDIM*ZDIM]=0.f; f[j+14*pitch*YDIM*ZDIM]=0.f; f[j+15*pitch*YDIM*ZDIM]=0.f; f[j+16*pitch*YDIM*ZDIM]=0.f; f[j+17*pitch*YDIM*ZDIM]=0.f; f[j+18*pitch*YDIM*ZDIM]=0.f; } } } __global__ void initialize_LR(float *f, size_t pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; int j = x+y*pitch+z*YLRDIM*pitch;//index on padded mem (pitch in elements) float xcoord = LRX0+x*LRFACTOR; float ycoord = LRY0+y*LRFACTOR; float zcoord = LRZ0+z*LRFACTOR; int im = ImageFcn(xcoord,ycoord,zcoord); float u,v,w,rho,usqr; rho = 1.f; u = 0.0f; v = UMAX;//0.0f; w = 0.0f; if(im == 10 || im == 1){ u = 0.0f; v = 0.0f; w = 0.0f; } //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; if(MODEL == "BGK"){ f[j+0 *pitch*YLRDIM*ZLRDIM]= 1.0f/3.0f*(rho-1.5f*usqr); f[j+1 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f[j+2 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f[j+3 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f[j+4 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f[j+5 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f[j+6 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f[j+7 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f[j+8 *pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f[j+9 *pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f[j+10*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f[j+11*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr); f[j+12*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f[j+13*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr); f[j+14*pitch*YLRDIM*ZLRDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f[j+15*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f[j+16*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f[j+17*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f[j+18*pitch*YLRDIM*ZLRDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); } else{ float f0 = 0.1904761791f*rho+-0.597127747f*usqr ; float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); f1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; f[j+0 *pitch*YLRDIM*ZLRDIM]=f0 ; f[j+1 *pitch*YLRDIM*ZLRDIM]=f1 ; f[j+2 *pitch*YLRDIM*ZLRDIM]=f2 ; f[j+3 *pitch*YLRDIM*ZLRDIM]=f3 ; f[j+4 *pitch*YLRDIM*ZLRDIM]=f4 ; f[j+5 *pitch*YLRDIM*ZLRDIM]=f5 ; f[j+6 *pitch*YLRDIM*ZLRDIM]=f6 ; f[j+7 *pitch*YLRDIM*ZLRDIM]=f7 ; f[j+8 *pitch*YLRDIM*ZLRDIM]=f8 ; f[j+9 *pitch*YLRDIM*ZLRDIM]=f9 ; f[j+10*pitch*YLRDIM*ZLRDIM]=f10; f[j+11*pitch*YLRDIM*ZLRDIM]=f11; f[j+12*pitch*YLRDIM*ZLRDIM]=f12; f[j+13*pitch*YLRDIM*ZLRDIM]=f13; f[j+14*pitch*YLRDIM*ZLRDIM]=f14; f[j+15*pitch*YLRDIM*ZLRDIM]=f15; f[j+16*pitch*YLRDIM*ZLRDIM]=f16; f[j+17*pitch*YLRDIM*ZLRDIM]=f17; f[j+18*pitch*YLRDIM*ZLRDIM]=f18; } } __global__ void initialize(float* f0, float* f1, float* f2, float* f3, float* f4, float* f5, float* f6, float* f7, float* f8, float* f9, float* f10, float* f11, float* f12, float* f13, float* f14, float* f15, float* f16, float* f17, float* f18, size_t pitch)//pitch in elements //__global__ void initialize(void** f0in, void** f1in, // int w, int h, int pitch)//pitch in elements { int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem int y = threadIdx.y+blockIdx.y*blockDim.y; int z = threadIdx.z+blockIdx.z*blockDim.z; // int i = x+y*XDIM+z*XDIM*YDIM;//index on linear mem int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements) // f1out[j] = tex2D(texRef_f2A,x,y+h*z); float u,v,w,rho,feq,usqr; rho = 1.0f; u = 0.0f; v = 0.0f; w = 0.0f; //if(x == 3 ) u = 0.1f; usqr = u*u+v*v+w*w; feq = 1.0f/3.0f*(rho-1.5f*usqr); f0[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr); f1[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr); f2[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr); f3[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr); f4[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr); f5[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr); f6[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr); f7[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr); f8[j] = feq; feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr); f9[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr); f10[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr); f11[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr); f12[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr); f13[j] = feq; feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr); f14[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr); f15[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr); f16[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr); f17[j] = feq; feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr); f18[j] = feq; } int main(int argc, char *argv[]) { //int *image_d, *image_h; ofstream output; ofstream output2; string FileName = CASENAME; //output.open ("LBM1_out.dat"); output.open ((FileName+".dat").c_str()); output2.open ((FileName+".force").c_str()); size_t memsize, memsize2; size_t pitch = 0; size_t pitch2 = 0; int i, n, nBlocks, nBlocks2, n2; float omega, CharLength, omega2; if(abs(LRFACTOR-1.f/LRLEVEL)>0.001f){ cout<<"LRLEVEL and LRFACTOR don't match! Exiting..."<<endl; return 0; } CharLength = OBSTR1*2.f; omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f); omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f)); if(LRFACTOR == 0.25f){ omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega2-1.0f)); } float SF_cf = omega*(1.0f-omega2)/((1.0f-omega)*omega2/LRFACTOR); float SF_fc = 1.f/SF_cf; cout<<"omega : "<<omega<<endl; cout<<"omega2: "<<omega2<<endl; cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl; cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl; cout<<"LRblocksize: "<<BLOCKSIZELRX<<"x"<<BLOCKSIZELRY<<"x"<<BLOCKSIZELRZ<<endl; cout<<"LRgrid: "<<XLRDIM<<"x"<<YLRDIM<<"x"<<ZLRDIM<<endl; cout<<"TMAX: "<<TMAX<<endl; cout<<"Method: "<<METHOD<<endl; cout<<"Model: "<<MODEL<<endl; nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*(YDIM/BLOCKSIZEY+YDIM%BLOCKSIZEY) *(ZDIM/BLOCKSIZEZ+ZDIM%BLOCKSIZEZ); nBlocks2 = (XLRDIM/BLOCKSIZELRX+XLRDIM%BLOCKSIZELRX)*(YLRDIM/BLOCKSIZELRY+YLRDIM%BLOCKSIZELRY) *(ZLRDIM/BLOCKSIZELRZ+ZLRDIM%BLOCKSIZELRZ); int B = BLOCKSIZEX*BLOCKSIZEY*BLOCKSIZEZ; int B2 = BLOCKSIZELRX*BLOCKSIZELRY*BLOCKSIZELRZ; n = nBlocks*B; n2 = nBlocks2*B2; cout<<"nBlocks:"<<nBlocks<<endl; dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),YDIM/BLOCKSIZEY,ZDIM/BLOCKSIZEZ); dim3 threads2(BLOCKSIZELRX, BLOCKSIZELRY, BLOCKSIZELRZ); dim3 grid2(XLRDIM/BLOCKSIZELRX,YLRDIM/BLOCKSIZELRY,ZLRDIM/BLOCKSIZELRZ); memsize = n*sizeof(float); //memsize_int = n*sizeof(int); memsize2 = n2*sizeof(float); //cudaExtent extent = make_cudaExtent(XDIM*sizeof(float),YDIM,ZDIM); //image_h = (int *)malloc(memsize_int); float *fA_h,*fA_d,*fB_d,*fC_h,*fC_d,*fD_d; float *FX_h,*FY_h,*FZ_h,*FX_d,*FY_d,*FZ_d; float *uAv_h,*vAv_h,*wAv_h,*uAv_d,*vAv_d,*wAv_d; float *uAvLR_h,*vAvLR_h,*wAvLR_h,*uAvLR_d,*vAvLR_d,*wAvLR_d; float *ufluc_h,*vfluc_h,*wfluc_h,*ufluc_d,*vfluc_d,*wfluc_d; float *uflucLR_h,*vflucLR_h,*wflucLR_h,*uflucLR_d,*vflucLR_d,*wflucLR_d; fA_h = (float *)malloc(memsize*19); fC_h = (float *)malloc(memsize2*19); FX_h = (float *)malloc(TMAX*sizeof(float)); FY_h = (float *)malloc(TMAX*sizeof(float)); FZ_h = (float *)malloc(TMAX*sizeof(float)); uAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); vAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); wAv_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); uAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); vAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); wAvLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); ufluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); vfluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); wfluc_h = (float *)malloc(XDIM*YDIM*ZDIM*sizeof(float)); uflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); vflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); wflucLR_h = (float *)malloc(XLRDIM*YLRDIM*ZLRDIM*sizeof(float)); cudaMallocPitch((void **) &fA_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMallocPitch((void **) &fB_d, &pitch, XDIM*sizeof(float), YDIM*ZDIM*19); cudaMallocPitch((void **) &uAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); cudaMallocPitch((void **) &vAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); cudaMallocPitch((void **) &wAv_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); cudaMallocPitch((void **) &ufluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); cudaMallocPitch((void **) &vfluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); cudaMallocPitch((void **) &wfluc_d,&pitch, XDIM*sizeof(float), YDIM*ZDIM); if(REFINEMENT == "YES"){ cudaMallocPitch((void **) &fC_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19); cudaMallocPitch((void **) &fD_d, &pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM*19); if(VELAV == "YES"){ cudaMallocPitch((void **) &uAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); cudaMallocPitch((void **) &vAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); cudaMallocPitch((void **) &wAvLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); cudaMallocPitch((void **) &uflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); cudaMallocPitch((void **) &vflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); cudaMallocPitch((void **) &wflucLR_d,&pitch2, XLRDIM*sizeof(float), YLRDIM*ZLRDIM); } } cudaMalloc((void **) &FX_d, TMAX*sizeof(float)); cudaMalloc((void **) &FY_d, TMAX*sizeof(float)); cudaMalloc((void **) &FZ_d, TMAX*sizeof(float)); cout<<pitch<<", "<<pitch2<<endl; size_t pitch_elements = pitch/sizeof(float); size_t pitch_elements2 = pitch2/sizeof(float); cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); for (i = 0; i < n*19; i++) { fA_h[i] = i; } for (i = 0; i < n2*19; i++) { fC_h[i] = 0; } for (i = 0; i < TMAX; i++){ FX_h[i] = 0.f; FY_h[i] = 0.f; FZ_h[i] = 0.f; } for (i = 0; i < n; i++) { uAv_h[i] = 0; vAv_h[i] = 0; wAv_h[i] = 0; ufluc_h[i] = 0; vfluc_h[i] = 0; wfluc_h[i] = 0; } for (i = 0; i < n2; i++) { uAvLR_h[i] = 0; vAvLR_h[i] = 0; wAvLR_h[i] = 0; uflucLR_h[i] = 0; vflucLR_h[i] = 0; wflucLR_h[i] = 0; } cudaMemcpy(FX_d, FX_h, TMAX*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(FY_d, FY_h, TMAX*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(FZ_d, FZ_h, TMAX*sizeof(float), cudaMemcpyHostToDevice); if(VELAV == "YES"){ cudaMemcpy2D(uAv_d, pitch, uAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(vAv_d, pitch, vAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(wAv_d, pitch, wAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(ufluc_d, pitch, uAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(vfluc_d, pitch, vAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(wfluc_d, pitch, wAv_h, XDIM*sizeof(float), XDIM*sizeof(float), YDIM*ZDIM, cudaMemcpyHostToDevice); if(REFINEMENT == "YES"){ cudaMemcpy2D(uAvLR_d, pitch2, uAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(vAvLR_d, pitch2, vAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(wAvLR_d, pitch2, wAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(uflucLR_d,pitch2, uAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(vflucLR_d,pitch2, vAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); cudaMemcpy2D(wflucLR_d,pitch2, wAvLR_h, XLRDIM*sizeof(float), XLRDIM*sizeof(float), YLRDIM*ZLRDIM, cudaMemcpyHostToDevice); } } // for (i = 0; i < n; i++) // { // int x = i%XDIM; // int y = (i/XDIM)%YDIM; // int z = (i/XDIM)/YDIM; //// image_h[i] = 0; //// if(x < 1) image_h[i] = 1;//DirichletWest //// if(x > XDIM-2) image_h[i] = 1;//BB //// if(y < 1) image_h[i] = 1;//BB //// if(y > YDIM-2) image_h[i] = 1;//BB //// if(z < 1) image_h[i] = 1;//DirichletWest //// if(z > ZDIM-2) image_h[i] = 1;//BB // } //cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice); if(true)//texture settings { texRef_f0B.normalized = false; texRef_f1B.normalized = false; texRef_f2B.normalized = false; texRef_f3B.normalized = false; texRef_f4B.normalized = false; texRef_f5B.normalized = false; texRef_f6B.normalized = false; texRef_f7B.normalized = false; texRef_f8B.normalized = false; texRef_f9B.normalized = false; texRef_f10B.normalized = false; texRef_f11B.normalized = false; texRef_f12B.normalized = false; texRef_f13B.normalized = false; texRef_f14B.normalized = false; texRef_f15B.normalized = false; texRef_f16B.normalized = false; texRef_f17B.normalized = false; texRef_f18B.normalized = false; texRef_f0B.filterMode = cudaFilterModeLinear; texRef_f1B.filterMode = cudaFilterModeLinear; texRef_f2B.filterMode = cudaFilterModeLinear; texRef_f3B.filterMode = cudaFilterModeLinear; texRef_f4B.filterMode = cudaFilterModeLinear; texRef_f5B.filterMode = cudaFilterModeLinear; texRef_f6B.filterMode = cudaFilterModeLinear; texRef_f7B.filterMode = cudaFilterModeLinear; texRef_f8B.filterMode = cudaFilterModeLinear; texRef_f9B.filterMode = cudaFilterModeLinear; texRef_f10B.filterMode = cudaFilterModeLinear; texRef_f11B.filterMode = cudaFilterModeLinear; texRef_f12B.filterMode = cudaFilterModeLinear; texRef_f13B.filterMode = cudaFilterModeLinear; texRef_f14B.filterMode = cudaFilterModeLinear; texRef_f15B.filterMode = cudaFilterModeLinear; texRef_f16B.filterMode = cudaFilterModeLinear; texRef_f17B.filterMode = cudaFilterModeLinear; texRef_f18B.filterMode = cudaFilterModeLinear; texRef_f0A.normalized = false; texRef_f1A.normalized = false; texRef_f2A.normalized = false; texRef_f3A.normalized = false; texRef_f4A.normalized = false; texRef_f5A.normalized = false; texRef_f6A.normalized = false; texRef_f7A.normalized = false; texRef_f8A.normalized = false; texRef_f9A.normalized = false; texRef_f10A.normalized = false; texRef_f11A.normalized = false; texRef_f12A.normalized = false; texRef_f13A.normalized = false; texRef_f14A.normalized = false; texRef_f15A.normalized = false; texRef_f16A.normalized = false; texRef_f17A.normalized = false; texRef_f18A.normalized = false; texRef_f0A.filterMode = cudaFilterModeLinear; texRef_f1A.filterMode = cudaFilterModeLinear; texRef_f2A.filterMode = cudaFilterModeLinear; texRef_f3A.filterMode = cudaFilterModeLinear; texRef_f4A.filterMode = cudaFilterModeLinear; texRef_f5A.filterMode = cudaFilterModeLinear; texRef_f6A.filterMode = cudaFilterModeLinear; texRef_f7A.filterMode = cudaFilterModeLinear; texRef_f8A.filterMode = cudaFilterModeLinear; texRef_f9A.filterMode = cudaFilterModeLinear; texRef_f10A.filterMode = cudaFilterModeLinear; texRef_f11A.filterMode = cudaFilterModeLinear; texRef_f12A.filterMode = cudaFilterModeLinear; texRef_f13A.filterMode = cudaFilterModeLinear; texRef_f14A.filterMode = cudaFilterModeLinear; texRef_f15A.filterMode = cudaFilterModeLinear; texRef_f16A.filterMode = cudaFilterModeLinear; texRef_f17A.filterMode = cudaFilterModeLinear; texRef_f18A.filterMode = cudaFilterModeLinear; // if(REFINEMENT == "YES"){ texRef_f0C.normalized = false; texRef_f1C.normalized = false; texRef_f2C.normalized = false; texRef_f3C.normalized = false; texRef_f4C.normalized = false; texRef_f5C.normalized = false; texRef_f6C.normalized = false; texRef_f7C.normalized = false; texRef_f8C.normalized = false; texRef_f9C.normalized = false; texRef_f10C.normalized = false; texRef_f11C.normalized = false; texRef_f12C.normalized = false; texRef_f13C.normalized = false; texRef_f14C.normalized = false; texRef_f15C.normalized = false; texRef_f16C.normalized = false; texRef_f17C.normalized = false; texRef_f18C.normalized = false; texRef_f0C.filterMode = cudaFilterModeLinear; texRef_f1C.filterMode = cudaFilterModeLinear; texRef_f2C.filterMode = cudaFilterModeLinear; texRef_f3C.filterMode = cudaFilterModeLinear; texRef_f4C.filterMode = cudaFilterModeLinear; texRef_f5C.filterMode = cudaFilterModeLinear; texRef_f6C.filterMode = cudaFilterModeLinear; texRef_f7C.filterMode = cudaFilterModeLinear; texRef_f8C.filterMode = cudaFilterModeLinear; texRef_f9C.filterMode = cudaFilterModeLinear; texRef_f10C.filterMode = cudaFilterModeLinear; texRef_f11C.filterMode = cudaFilterModeLinear; texRef_f12C.filterMode = cudaFilterModeLinear; texRef_f13C.filterMode = cudaFilterModeLinear; texRef_f14C.filterMode = cudaFilterModeLinear; texRef_f15C.filterMode = cudaFilterModeLinear; texRef_f16C.filterMode = cudaFilterModeLinear; texRef_f17C.filterMode = cudaFilterModeLinear; texRef_f18C.filterMode = cudaFilterModeLinear; texRef_f0D.normalized = false; texRef_f1D.normalized = false; texRef_f2D.normalized = false; texRef_f3D.normalized = false; texRef_f4D.normalized = false; texRef_f5D.normalized = false; texRef_f6D.normalized = false; texRef_f7D.normalized = false; texRef_f8D.normalized = false; texRef_f9D.normalized = false; texRef_f10D.normalized = false; texRef_f11D.normalized = false; texRef_f12D.normalized = false; texRef_f13D.normalized = false; texRef_f14D.normalized = false; texRef_f15D.normalized = false; texRef_f16D.normalized = false; texRef_f17D.normalized = false; texRef_f18D.normalized = false; texRef_f0D.filterMode = cudaFilterModeLinear; texRef_f1D.filterMode = cudaFilterModeLinear; texRef_f2D.filterMode = cudaFilterModeLinear; texRef_f3D.filterMode = cudaFilterModeLinear; texRef_f4D.filterMode = cudaFilterModeLinear; texRef_f5D.filterMode = cudaFilterModeLinear; texRef_f6D.filterMode = cudaFilterModeLinear; texRef_f7D.filterMode = cudaFilterModeLinear; texRef_f8D.filterMode = cudaFilterModeLinear; texRef_f9D.filterMode = cudaFilterModeLinear; texRef_f10D.filterMode = cudaFilterModeLinear; texRef_f11D.filterMode = cudaFilterModeLinear; texRef_f12D.filterMode = cudaFilterModeLinear; texRef_f13D.filterMode = cudaFilterModeLinear; texRef_f14D.filterMode = cudaFilterModeLinear; texRef_f15D.filterMode = cudaFilterModeLinear; texRef_f16D.filterMode = cudaFilterModeLinear; texRef_f17D.filterMode = cudaFilterModeLinear; texRef_f18D.filterMode = cudaFilterModeLinear; // } for(int i = 0; i<2; i++){ texRef_f0A.addressMode[i] = cudaAddressModeClamp; texRef_f1A.addressMode[i] = cudaAddressModeClamp; texRef_f2A.addressMode[i] = cudaAddressModeClamp; texRef_f3A.addressMode[i] = cudaAddressModeClamp; texRef_f4A.addressMode[i] = cudaAddressModeClamp; texRef_f5A.addressMode[i] = cudaAddressModeClamp; texRef_f6A.addressMode[i] = cudaAddressModeClamp; texRef_f7A.addressMode[i] = cudaAddressModeClamp; texRef_f8A.addressMode[i] = cudaAddressModeClamp; texRef_f9A.addressMode[i] = cudaAddressModeClamp; texRef_f10A.addressMode[i] = cudaAddressModeClamp; texRef_f11A.addressMode[i] = cudaAddressModeClamp; texRef_f12A.addressMode[i] = cudaAddressModeClamp; texRef_f13A.addressMode[i] = cudaAddressModeClamp; texRef_f14A.addressMode[i] = cudaAddressModeClamp; texRef_f15A.addressMode[i] = cudaAddressModeClamp; texRef_f16A.addressMode[i] = cudaAddressModeClamp; texRef_f17A.addressMode[i] = cudaAddressModeClamp; texRef_f18A.addressMode[i] = cudaAddressModeClamp; texRef_f0B.addressMode[i] = cudaAddressModeClamp; texRef_f1B.addressMode[i] = cudaAddressModeClamp; texRef_f2B.addressMode[i] = cudaAddressModeClamp; texRef_f3B.addressMode[i] = cudaAddressModeClamp; texRef_f4B.addressMode[i] = cudaAddressModeClamp; texRef_f5B.addressMode[i] = cudaAddressModeClamp; texRef_f6B.addressMode[i] = cudaAddressModeClamp; texRef_f7B.addressMode[i] = cudaAddressModeClamp; texRef_f8B.addressMode[i] = cudaAddressModeClamp; texRef_f9B.addressMode[i] = cudaAddressModeClamp; texRef_f10B.addressMode[i] = cudaAddressModeClamp; texRef_f11B.addressMode[i] = cudaAddressModeClamp; texRef_f12B.addressMode[i] = cudaAddressModeClamp; texRef_f13B.addressMode[i] = cudaAddressModeClamp; texRef_f14B.addressMode[i] = cudaAddressModeClamp; texRef_f15B.addressMode[i] = cudaAddressModeClamp; texRef_f16B.addressMode[i] = cudaAddressModeClamp; texRef_f17B.addressMode[i] = cudaAddressModeClamp; texRef_f18B.addressMode[i] = cudaAddressModeClamp; } } cudaMemcpy2D(fA_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); cudaMemcpy2D(fB_d,pitch ,fA_h,XDIM*sizeof(float),XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyHostToDevice); if(REFINEMENT == "YES"){ cudaMemcpy2D(fC_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyHostToDevice); cudaMemcpy2D(fD_d,pitch2,fC_h,XLRDIM*sizeof(float),XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyHostToDevice); } // for (i = 0; i < n*19; i++) // { // fA_h[i] = 0; // fC_h[i] = 1; // } if(true)//bind texture { cudaBindTexture2D(0,&texRef_f0A, fA_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1A, fA_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2A, fA_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3A, fA_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4A, fA_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5A, fA_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6A, fA_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7A, fA_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8A, fA_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9A, fA_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10A,fA_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11A,fA_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12A,fA_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13A,fA_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14A,fA_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15A,fA_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16A,fA_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17A,fA_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18A,fA_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f0B, fB_d ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f1B, fB_d+pitch_elements*YDIM*ZDIM ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f2B, fB_d+pitch_elements*YDIM*ZDIM*2 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f3B, fB_d+pitch_elements*YDIM*ZDIM*3 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f4B, fB_d+pitch_elements*YDIM*ZDIM*4 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f5B, fB_d+pitch_elements*YDIM*ZDIM*5 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f6B, fB_d+pitch_elements*YDIM*ZDIM*6 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f7B, fB_d+pitch_elements*YDIM*ZDIM*7 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f8B, fB_d+pitch_elements*YDIM*ZDIM*8 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f9B, fB_d+pitch_elements*YDIM*ZDIM*9 ,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f10B,fB_d+pitch_elements*YDIM*ZDIM*10,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f11B,fB_d+pitch_elements*YDIM*ZDIM*11,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f12B,fB_d+pitch_elements*YDIM*ZDIM*12,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f13B,fB_d+pitch_elements*YDIM*ZDIM*13,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f14B,fB_d+pitch_elements*YDIM*ZDIM*14,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f15B,fB_d+pitch_elements*YDIM*ZDIM*15,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f16B,fB_d+pitch_elements*YDIM*ZDIM*16,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f17B,fB_d+pitch_elements*YDIM*ZDIM*17,&desc,XDIM,YDIM*ZDIM,pitch); cudaBindTexture2D(0,&texRef_f18B,fB_d+pitch_elements*YDIM*ZDIM*18,&desc,XDIM,YDIM*ZDIM,pitch); // if(REFINEMENT == "YES"){ cudaBindTexture2D(0,&texRef_f0C, fC_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f1C, fC_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f2C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f3C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f4C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f5C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f6C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f7C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f8C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f9C, fC_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f10C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f11C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f12C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f13C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f14C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f15C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f16C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f17C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f18C,fC_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f0D, fD_d ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f1D, fD_d+pitch_elements2*YLRDIM*ZLRDIM ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f2D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*2 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f3D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*3 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f4D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*4 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f5D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*5 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f6D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*6 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f7D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*7 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f8D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*8 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f9D, fD_d+pitch_elements2*YLRDIM*ZLRDIM*9 ,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f10D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*10,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f11D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*11,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f12D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*12,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f13D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*13,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f14D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*14,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f15D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*15,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f16D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*16,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f17D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*17,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); cudaBindTexture2D(0,&texRef_f18D,fD_d+pitch_elements2*YLRDIM*ZLRDIM*18,&desc,XLRDIM,YLRDIM*ZLRDIM,pitch2); // } } initialize_single<<<grid, threads>>>(fA_d,pitch_elements); initialize_single<<<grid, threads>>>(fB_d,pitch_elements); if(REFINEMENT == "YES"){ initialize_LR<<<grid2, threads2>>>(fC_d,pitch_elements2); initialize_LR<<<grid2, threads2>>>(fD_d,pitch_elements2); } cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_ABCD_force,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_ABCD,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_ABCD2,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_ABDC2,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_ABDC_Interp,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_BACD_force,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_BACD,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_BADC2,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LR_d_BADC_Interp,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(ExtractFromC_d,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(simple_copy,cudaFuncCachePreferL1); struct timeval tdr0,tdr1; double restime; cudaDeviceSynchronize(); gettimeofday (&tdr0,NULL); for(int t = 0; t<TMAX; t=t+2){ if(METHOD == "SINGLE"){ if(t >= STARTF) mrt_d_single_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t,uAv_d,vAv_d,ufluc_d,vfluc_d); else mrt_d_single<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); if(REFINEMENT == "YES"){ if(LRFACTOR == 0.5f) { if(t >= STARTF) LR_d_ABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else LR_d_ABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); //LR_d_ABDC<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); LR_d_ABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } else if(LRFACTOR == 0.25f) { if(t >= STARTF) LR_d_ABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else LR_d_ABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_ABDC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_ABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_ABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,t,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } //ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,SF_fc); ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,omega,omega2); } if(t >= STARTF) mrt_d_single_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1,uAv_d,vAv_d,ufluc_d,vfluc_d); else mrt_d_single<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); if(REFINEMENT == "YES"){ if(LRFACTOR == 0.5f) { if(t >= STARTF) LR_d_BACD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else LR_d_BACD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_BADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } else if(LRFACTOR == 0.25f) { if(t >= STARTF) LR_d_BACD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); else LR_d_BACD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_BADC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_ABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); LR_d_BADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,t+1,uAvLR_d,vAvLR_d,uflucLR_d,vflucLR_d); } //ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,SF_fc); ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,omega,omega2); } } // else if(METHOD == "CACHE"){ // mrt_d_cache<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_cache<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // // else if(METHOD == "HYB"){ // if(t >= STARTF && REFINEMENT == "NO") // mrt_d_hybAB_force<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t); // else // mrt_d_hybAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // // if(REFINEMENT == "YES"){ // if(LRFACTOR == 0.5f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // else if(LRFACTOR == 0.25f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybABDC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2); // // LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3); // LR_d_hybABDC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // // ExtractFromC_d<<<grid, threads>>>(fB_d,pitch_elements,SF_fc); // } // // if(t >= STARTF && REFINEMENT == "NO") // mrt_d_hybBA_force<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements,FX_d,FY_d,FZ_d,t+1); // else // mrt_d_hybBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // // if(REFINEMENT == "YES"){ // if(LRFACTOR == 0.5f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // else if(LRFACTOR == 0.25f) // { // if(t >= STARTF) // LR_d_hybABCD_force<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,FX_d,FY_d,FZ_d,t+1); // else // LR_d_hybABCD<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2); // // LR_d_hybBADC2<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf,2); // // LR_d_hybABCD2<<<grid2, threads2>>>(fC_d,fD_d,omega2,pitch_elements2,3); // LR_d_hybBADC_Interp<<<grid2, threads2>>>(fD_d,fC_d,omega2,pitch_elements2,SF_cf); // } // // ExtractFromC_d<<<grid, threads>>>(fA_d,pitch_elements,SF_fc); // } // } // else if(METHOD == "TEXT"){ // mrt_d_textAB<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_textBA<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // // else if(METHOD == "SHARED"){ // mrt_d_shared<<<grid, threads>>>(fA_d,fB_d,omega,pitch_elements); // mrt_d_shared<<<grid, threads>>>(fB_d,fA_d,omega,pitch_elements); // } // simple_copy<<<grid, threads>>>(fA_d,fB_d,pitch_elements); // simple_copy<<<grid, threads>>>(fB_d,fA_d,pitch_elements); // // simple_text<<<grid, threads>>>(fA_d,fB_d,pitch_elements); // simple_text<<<grid, threads>>>(fB_d,fA_d,pitch_elements); if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n"; } cudaDeviceSynchronize(); gettimeofday (&tdr1,NULL); timeval_subtract (&restime, &tdr1, &tdr0); int Nodes; if(REFINEMENT == "YES"){ Nodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL); } else{ Nodes = XDIM*YDIM*ZDIM; } cout<<"Time taken for main kernel: "<<restime<<" (" <<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)"; if(REFINEMENT == "YES"){ int effNodes = (XDIM*YDIM*ZDIM+XLRDIM*YLRDIM*ZLRDIM*LRLEVEL -(XLRDIM/LRLEVEL)*(YLRDIM/LRLEVEL)*(YLRDIM/LRLEVEL)); cout<<" (eff: "<<double(effNodes*double(TMAX/1000000.f))/restime<<"MLUPS)"; } cout<<endl; cout<<XDIM<<","<<YDIM<<","<<ZDIM<<","<<TMAX<<","<<restime<<endl; if(true){ cudaUnbindTexture(texRef_f0A); cudaUnbindTexture(texRef_f1A); cudaUnbindTexture(texRef_f2A); cudaUnbindTexture(texRef_f3A); cudaUnbindTexture(texRef_f4A); cudaUnbindTexture(texRef_f5A); cudaUnbindTexture(texRef_f6A); cudaUnbindTexture(texRef_f7A); cudaUnbindTexture(texRef_f8A); cudaUnbindTexture(texRef_f9A); cudaUnbindTexture(texRef_f10A); cudaUnbindTexture(texRef_f11A); cudaUnbindTexture(texRef_f12A); cudaUnbindTexture(texRef_f13A); cudaUnbindTexture(texRef_f14A); cudaUnbindTexture(texRef_f15A); cudaUnbindTexture(texRef_f16A); cudaUnbindTexture(texRef_f17A); cudaUnbindTexture(texRef_f18A); cudaUnbindTexture(texRef_f0B); cudaUnbindTexture(texRef_f1B); cudaUnbindTexture(texRef_f2B); cudaUnbindTexture(texRef_f3B); cudaUnbindTexture(texRef_f4B); cudaUnbindTexture(texRef_f5B); cudaUnbindTexture(texRef_f6B); cudaUnbindTexture(texRef_f7B); cudaUnbindTexture(texRef_f8B); cudaUnbindTexture(texRef_f9B); cudaUnbindTexture(texRef_f10B); cudaUnbindTexture(texRef_f11B); cudaUnbindTexture(texRef_f12B); cudaUnbindTexture(texRef_f13B); cudaUnbindTexture(texRef_f14B); cudaUnbindTexture(texRef_f15B); cudaUnbindTexture(texRef_f16B); cudaUnbindTexture(texRef_f17B); cudaUnbindTexture(texRef_f18B); cudaUnbindTexture(texRef_f0C); cudaUnbindTexture(texRef_f1C); cudaUnbindTexture(texRef_f2C); cudaUnbindTexture(texRef_f3C); cudaUnbindTexture(texRef_f4C); cudaUnbindTexture(texRef_f5C); cudaUnbindTexture(texRef_f6C); cudaUnbindTexture(texRef_f7C); cudaUnbindTexture(texRef_f8C); cudaUnbindTexture(texRef_f9C); cudaUnbindTexture(texRef_f10C); cudaUnbindTexture(texRef_f11C); cudaUnbindTexture(texRef_f12C); cudaUnbindTexture(texRef_f13C); cudaUnbindTexture(texRef_f14C); cudaUnbindTexture(texRef_f15C); cudaUnbindTexture(texRef_f16C); cudaUnbindTexture(texRef_f17C); cudaUnbindTexture(texRef_f18C); cudaUnbindTexture(texRef_f0D); cudaUnbindTexture(texRef_f1D); cudaUnbindTexture(texRef_f2D); cudaUnbindTexture(texRef_f3D); cudaUnbindTexture(texRef_f4D); cudaUnbindTexture(texRef_f5D); cudaUnbindTexture(texRef_f6D); cudaUnbindTexture(texRef_f7D); cudaUnbindTexture(texRef_f8D); cudaUnbindTexture(texRef_f9D); cudaUnbindTexture(texRef_f10D); cudaUnbindTexture(texRef_f11D); cudaUnbindTexture(texRef_f12D); cudaUnbindTexture(texRef_f13D); cudaUnbindTexture(texRef_f14D); cudaUnbindTexture(texRef_f15D); cudaUnbindTexture(texRef_f16D); cudaUnbindTexture(texRef_f17D); cudaUnbindTexture(texRef_f18D); } cudaMemcpy2D(fA_h,XDIM*sizeof(float),fA_d,pitch,XDIM*sizeof(float),YDIM*ZDIM*19,cudaMemcpyDeviceToHost); if(REFINEMENT == "YES"){ cudaMemcpy2D(fC_h,XLRDIM*sizeof(float),fC_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM*19,cudaMemcpyDeviceToHost); } if(VELAV == "YES"){ cudaMemcpy2D(uAv_h,XDIM*sizeof(float),uAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(vAv_h,XDIM*sizeof(float),vAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(wAv_h,XDIM*sizeof(float),wAv_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(ufluc_h,XDIM*sizeof(float),ufluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(vfluc_h,XDIM*sizeof(float),vfluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(wfluc_h,XDIM*sizeof(float),wfluc_d,pitch,XDIM*sizeof(float),YDIM*ZDIM,cudaMemcpyDeviceToHost); if(REFINEMENT == "YES"){ cudaMemcpy2D(uAvLR_h,XLRDIM*sizeof(float),uAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(vAvLR_h,XLRDIM*sizeof(float),vAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(wAvLR_h,XLRDIM*sizeof(float),wAvLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(uflucLR_h,XLRDIM*sizeof(float),uflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(vflucLR_h,XLRDIM*sizeof(float),vflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); cudaMemcpy2D(wflucLR_h,XLRDIM*sizeof(float),wflucLR_d,pitch2,XLRDIM*sizeof(float),YLRDIM*ZLRDIM,cudaMemcpyDeviceToHost); } } cudaMemcpy(FX_h, FX_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(FY_h, FY_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(FZ_h, FZ_d, TMAX*sizeof(float), cudaMemcpyDeviceToHost); //cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost); output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n"; int row = 0; int col = 0; int dep = 0; i = 0; float rho, u, v, w;//, usqr; //int j; int check = 0; float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; for(dep = 0; dep<ZDIM; dep++){ for(row = 0; row<YDIM; row++){ for(col = 0; col<XDIM; col++){ i = dep*XDIM*YDIM+row*XDIM+col; f0 = fA_h[i+XDIM*YDIM*ZDIM*0 ]; f1 = fA_h[i+XDIM*YDIM*ZDIM*1 ]; f2 = fA_h[i+XDIM*YDIM*ZDIM*2 ]; f3 = fA_h[i+XDIM*YDIM*ZDIM*3 ]; f4 = fA_h[i+XDIM*YDIM*ZDIM*4 ]; f5 = fA_h[i+XDIM*YDIM*ZDIM*5 ]; f6 = fA_h[i+XDIM*YDIM*ZDIM*6 ]; f7 = fA_h[i+XDIM*YDIM*ZDIM*7 ]; f8 = fA_h[i+XDIM*YDIM*ZDIM*8 ]; f9 = fA_h[i+XDIM*YDIM*ZDIM*9 ]; f10= fA_h[i+XDIM*YDIM*ZDIM*10]; f11= fA_h[i+XDIM*YDIM*ZDIM*11]; f12= fA_h[i+XDIM*YDIM*ZDIM*12]; f13= fA_h[i+XDIM*YDIM*ZDIM*13]; f14= fA_h[i+XDIM*YDIM*ZDIM*14]; f15= fA_h[i+XDIM*YDIM*ZDIM*15]; f16= fA_h[i+XDIM*YDIM*ZDIM*16]; f17= fA_h[i+XDIM*YDIM*ZDIM*17]; f18= fA_h[i+XDIM*YDIM*ZDIM*18]; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); // float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); // float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); // float m13 = f5+-f6+ f7+-f8 -u*v; // float m14 = f11 +- f13 + - f16 + f18 -v*w; // float m15 = f10 + - f12 +-f15 + f17 -u*w; // float PI11 = -0.026315789f*m1-0.5f *omega*m9; // float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // // float PI12 = -1.5f*omega*m13; // float PI23 = -1.5f*omega*m14; // float PI13 = -1.5f*omega*m15; // //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float Smag = Q*3.f*omega/(sqrt(2.f)); output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<Smag<<"," <<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl; if(rho>0.f && rho<2.f){ } else{ check = 1; } } } } if(check == 1) cout<<"error!"<<endl; if(REFINEMENT == "YES"){ output<<endl;//<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\"\n"; output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n"; output<<"ZONE F=POINT, I="<<XLRDIM-0<<", J="<<YLRDIM-0<<", K="<<ZLRDIM-0<<"\n"; for(dep = 0; dep<ZLRDIM-0; dep++){ for(row = 0; row<YLRDIM-0; row++){ for(col = 0; col<XLRDIM-0; col++){ i = dep*XLRDIM*YLRDIM+row*XLRDIM+col; f0 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*0 ]; f1 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*1 ]; f2 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*2 ]; f3 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*3 ]; f4 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*4 ]; f5 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*5 ]; f6 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*6 ]; f7 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*7 ]; f8 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*8 ]; f9 = fC_h[i+XLRDIM*YLRDIM*ZLRDIM*9 ]; f10= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*10]; f11= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*11]; f12= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*12]; f13= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*13]; f14= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*14]; f15= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*15]; f16= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*16]; f17= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*17]; f18= fC_h[i+XLRDIM*YLRDIM*ZLRDIM*18]; rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9; rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18; u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17; v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18; w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18; float usqr = u*u+v*v+w*w; // float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w)); // float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w)); // float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w); // float m13 = f5+-f6+ f7+-f8 -u*v; // float m14 = f11 +- f13 + - f16 + f18 -v*w; // float m15 = f10 + - f12 +-f15 + f17 -u*w; // // float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9; // float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11); // float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11); // float PI12 = LRLEVEL*-1.5f*omega*m13; // float PI23 = LRLEVEL*-1.5f*omega*m14; // float PI13 = LRLEVEL*-1.5f*omega*m15; // //float nu0 = ((1.0f/omega)-0.5f)/3.0f; // float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13)); float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ; float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ; float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ; float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ; float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ; float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ; float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ; float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ; float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ; float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ; float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w); float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ; float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w); float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w; float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ; float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w); float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ; float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w); feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w)); feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w); feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ; feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ; feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ; feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ; feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w; feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ; feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w; feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ; float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17); float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18); float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8); float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15); float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16); float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13); float Smag = Q*LRLEVEL*3.f*omega2/(sqrt(2.f)); output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", " <<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<Smag<<"," <<uAvLR_h[i]<<","<<vAvLR_h[i]<<", "<<uflucLR_h[i]<<","<<vflucLR_h[i]<<endl; //output<<LRX0+col*LRFACTOR<<", "<<LRY0+row*LRFACTOR<<", "<<LRZ0+dep*LRFACTOR<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl; } } } } output.close(); //for(int t = STARTF-1; t<TMAX; t++){ for(int t = 0; t<TMAX; t++){ output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", " <<FY_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<", " <<FZ_h[t]/(0.5f*UMAX*UMAX*2.f*OBSTR1*ZDIM)<<endl; // output2<<t<<", "<<FX_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", " // <<FY_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<", " // <<FZ_h[t]/(0.5f*UMAX*UMAX*OBSTR1*OBSTR1*3.14158f)<<endl; } output2.close(); //cudaFree(image_d); cudaFree(fA_d); cudaFree(fB_d); cudaFree(fC_d); cudaFree(fD_d); return(0); }
2bea91c42889f8fd611a2a356d8f74228d8f84a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/layers/wing_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void WingLossForward(const int n, const Dtype* abs_d, Dtype* log_d, const float w, const float epsilon, const float c) { CUDA_KERNEL_LOOP(index, n) { Dtype abs_val = abs_d[index]; if (abs_val < w) { log_d[index] = w * log_d[index]; } else { log_d[index] = abs_val - c; } } } template <typename Dtype> void WingLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* sub_x_data = diff_.mutable_gpu_data(); Dtype* abs_x_data = abs_x.mutable_gpu_data(); Dtype* log_abs_data = log_abs.mutable_gpu_data(); int count = bottom[0]->count(); caffe_set(count, Dtype(0), sub_x_data); caffe_set(count, Dtype(0), abs_x_data); caffe_set(count, Dtype(1.0), log_abs_data); caffe_sub(count, bottom_data, label_data, sub_x_data); caffe_abs(count, sub_x_data, abs_x_data); const Dtype scale = Dtype(1.0 / epsilon); caffe_axpy(count, scale, abs_x_data, log_abs_data); caffe_log(count, log_abs_data, log_abs_data); caffe_scal(count, w, log_abs_data); Dtype loss = 0.f; hipLaunchKernelGGL(( WingLossForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, abs_x.mutable_gpu_data(), log_abs.mutable_gpu_data(), w, epsilon, _c); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, log_abs.gpu_data(), one_dot.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void WingLossBackward(const int n, const Dtype* in, Dtype* out, const float w, const float epsilon) { CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); Dtype sign = (Dtype(0) < val) - (val < Dtype(0)); if (abs_val < w) { out[index] = sign * w / (epsilon + abs_val) ; } else { out[index] = sign; } } } template <typename Dtype> void WingLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* sub_x_data = diff_.gpu_data(); const Dtype* abs_x_data = abs_x.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int count = bottom[0]->count(); hipLaunchKernelGGL(( WingLossBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, sub_x_data, bottom_diff, w, epsilon); CUDA_POST_KERNEL_CHECK; if (propagate_down[0]) { Dtype loss_weight = top[0]->gpu_diff()[0]; caffe_scal(count, loss_weight / bottom[0]->num(), bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WingLossLayer); } // namespace caffe
2bea91c42889f8fd611a2a356d8f74228d8f84a2.cu
#include "caffe/layers/wing_loss_layer.hpp" namespace caffe { template <typename Dtype> __global__ void WingLossForward(const int n, const Dtype* abs_d, Dtype* log_d, const float w, const float epsilon, const float c) { CUDA_KERNEL_LOOP(index, n) { Dtype abs_val = abs_d[index]; if (abs_val < w) { log_d[index] = w * log_d[index]; } else { log_d[index] = abs_val - c; } } } template <typename Dtype> void WingLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* label_data = bottom[1]->gpu_data(); Dtype* sub_x_data = diff_.mutable_gpu_data(); Dtype* abs_x_data = abs_x.mutable_gpu_data(); Dtype* log_abs_data = log_abs.mutable_gpu_data(); int count = bottom[0]->count(); caffe_set(count, Dtype(0), sub_x_data); caffe_set(count, Dtype(0), abs_x_data); caffe_set(count, Dtype(1.0), log_abs_data); caffe_sub(count, bottom_data, label_data, sub_x_data); caffe_abs(count, sub_x_data, abs_x_data); const Dtype scale = Dtype(1.0 / epsilon); caffe_axpy(count, scale, abs_x_data, log_abs_data); caffe_log(count, log_abs_data, log_abs_data); caffe_scal(count, w, log_abs_data); Dtype loss = 0.f; WingLossForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, abs_x.mutable_gpu_data(), log_abs.mutable_gpu_data(), w, epsilon, _c); CUDA_POST_KERNEL_CHECK; caffe_gpu_dot(count, log_abs.gpu_data(), one_dot.gpu_data(), &loss); top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); } template <typename Dtype> __global__ void WingLossBackward(const int n, const Dtype* in, Dtype* out, const float w, const float epsilon) { CUDA_KERNEL_LOOP(index, n) { Dtype val = in[index]; Dtype abs_val = abs(val); Dtype sign = (Dtype(0) < val) - (val < Dtype(0)); if (abs_val < w) { out[index] = sign * w / (epsilon + abs_val) ; } else { out[index] = sign; } } } template <typename Dtype> void WingLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* sub_x_data = diff_.gpu_data(); const Dtype* abs_x_data = abs_x.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); int count = bottom[0]->count(); WingLossBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, sub_x_data, bottom_diff, w, epsilon); CUDA_POST_KERNEL_CHECK; if (propagate_down[0]) { Dtype loss_weight = top[0]->gpu_diff()[0]; caffe_scal(count, loss_weight / bottom[0]->num(), bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(WingLossLayer); } // namespace caffe
e1c2d270558614f15295bf4231d1de1dfba690b9.hip
// !!! This is a file automatically generated by hipify!!! /////////////////////// #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <ctime> #include <iostream> using namespace std; #if __DEVICE_EMULATION__ bool InitCUDA(void) { return true; } #else bool InitCUDA(void) { int count = 0; int i = 0; hipGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for (i = 0; i < count; i++) { hipDeviceProp_t prop; if (hipGetDeviceProperties(&prop, i) == hipSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } hipSetDevice(i); printf("CUDA initialized.\n"); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device : \" %s \" \n\n", prop.name); return true; } #endif #define aW 855+800 #define aH 5110 #define bW 1013+800 #define blocknum 128//32 #define threadnum 512//256 typedef struct { int width; int height; int *element; }Matrix; Matrix InitMatrix(int w, int h) { Matrix t; t.element = (int *)malloc(w * h * sizeof(int)); for (int i = 0; i < w*h; i++) t.element[i] = rand() % 10; t.width = w; t.height = h; return t; } Matrix MM(Matrix a, Matrix b) { Matrix t; t.element = (int *)malloc(a.height * b.width * sizeof(int)); t.width = b.width; t.height = a.height; int x; int y; for (int i = 0; i < t.width * t.height; i++) { x = i / t.width * a.width; y = i - i / t.width * t.width; t.element[i] = 0; for (int k = 0; k < a.width; k++) { t.element[i] += a.element[x + k] * b.element[y + b.width * k]; } } return t; } __global__ static void MatrixMul(int *ma, int *mb, int *mc, int *mp) { int aw = mp[0]; int bw = mp[2]; int cw = mp[4]; int ch = mp[5]; const int bid = blockIdx.x; const int tid = threadIdx.x; int i, x, y; for (i = bid * threadnum + tid; i < cw * ch; i += threadnum * blocknum) { x = i / cw * aw; y = i - i / cw * cw; mc[i] = 0; for (int k = 0; k < aw; k++) { mc[i] += ma[x + k] * mb[y + k * bw]; } } } int main(int argc, char* argv[]) { cout << "Matrix Mult Begin : " << endl; if (!InitCUDA()) { return 0; } // //int matrixa[N][N] , matrixb[N][N] , matrixc[N][N] , gpuresult[N][N] , matrixd[N][N] ; Matrix matrixa = InitMatrix(aW, aH); Matrix matrixb = InitMatrix(bW, aW); Matrix matrixc; Matrix gpuresult = InitMatrix(bW, aH); int matrixprop[6]; //CPU int start = clock(); matrixc = MM(matrixa, matrixb); int finish = clock(); double time = ((double)finish - (double)start) / CLOCKS_PER_SEC; cout << "CPU Time is " << time << endl; start = clock(); matrixprop[0] = matrixa.width; matrixprop[1] = matrixa.height; matrixprop[2] = matrixb.width; matrixprop[3] = matrixb.height; matrixprop[4] = matrixc.width; matrixprop[5] = matrixc.height; // int *ma, *mb, *mc, *mp; hipMalloc((void**)&ma, sizeof(int) * matrixa.width * matrixa.height); hipMalloc((void**)&mb, sizeof(int) * matrixb.width * matrixb.height); hipMalloc((void**)&mc, sizeof(int) * matrixc.width * matrixc.height); hipMalloc((void**)&mp, sizeof(int) * 6); // hipMemcpy(ma, matrixa.element, sizeof(int) * matrixa.width * matrixa.height, hipMemcpyHostToDevice); hipMemcpy(mb, matrixb.element, sizeof(int) * matrixb.width * matrixb.height, hipMemcpyHostToDevice); hipMemcpy(mp, matrixprop, sizeof(int) * 6, hipMemcpyHostToDevice); //CUDA MatrixMul << < blocknum, threadnum>> >(ma, mb, mc, mp); hipDeviceSynchronize(); // hipMemcpy(gpuresult.element, mc, sizeof(int) * gpuresult.width * gpuresult.height, hipMemcpyDeviceToHost); finish = clock(); time = ((double)finish - (double)start) / CLOCKS_PER_SEC; cout << "GPU Time is " << time << endl; for (int i = 0; i < gpuresult.width * gpuresult.height; i++) { //printf("%d -- %d\n",matrixc.element[ i ],gpuresult.element[ i ]); if (matrixc.element[i] != gpuresult.element[i]) { printf("ERROR"); } } hipFree(ma); hipFree(mb); hipFree(mc); hipFree(mp); return 0; }
e1c2d270558614f15295bf4231d1de1dfba690b9.cu
/////////////////////// #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <ctime> #include <iostream> using namespace std; #if __DEVICE_EMULATION__ bool InitCUDA(void) { return true; } #else bool InitCUDA(void) { int count = 0; int i = 0; cudaGetDeviceCount(&count); if (count == 0) { fprintf(stderr, "There is no device.\n"); return false; } for (i = 0; i < count; i++) { cudaDeviceProp prop; if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if (prop.major >= 1) { break; } } } if (i == count) { fprintf(stderr, "There is no device supporting CUDA.\n"); return false; } cudaSetDevice(i); printf("CUDA initialized.\n"); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device : \" %s \" \n\n", prop.name); return true; } #endif #define aW 855+800 #define aH 5110 #define bW 1013+800 #define blocknum 128//32 #define threadnum 512//256 typedef struct { int width; int height; int *element; }Matrix; Matrix InitMatrix(int w, int h) { Matrix t; t.element = (int *)malloc(w * h * sizeof(int)); for (int i = 0; i < w*h; i++) t.element[i] = rand() % 10; t.width = w; t.height = h; return t; } Matrix MM(Matrix a, Matrix b) { Matrix t; t.element = (int *)malloc(a.height * b.width * sizeof(int)); t.width = b.width; t.height = a.height; int x; int y; for (int i = 0; i < t.width * t.height; i++) { x = i / t.width * a.width; y = i - i / t.width * t.width; t.element[i] = 0; for (int k = 0; k < a.width; k++) { t.element[i] += a.element[x + k] * b.element[y + b.width * k]; } } return t; } __global__ static void MatrixMul(int *ma, int *mb, int *mc, int *mp) { int aw = mp[0]; int bw = mp[2]; int cw = mp[4]; int ch = mp[5]; const int bid = blockIdx.x; const int tid = threadIdx.x; int i, x, y; for (i = bid * threadnum + tid; i < cw * ch; i += threadnum * blocknum) { x = i / cw * aw; y = i - i / cw * cw; mc[i] = 0; for (int k = 0; k < aw; k++) { mc[i] += ma[x + k] * mb[y + k * bw]; } } } int main(int argc, char* argv[]) { cout << "Matrix Mult Begin : " << endl; if (!InitCUDA()) { return 0; } //������� //int matrixa[N][N] , matrixb[N][N] , matrixc[N][N] , gpuresult[N][N] , matrixd[N][N] ; Matrix matrixa = InitMatrix(aW, aH); Matrix matrixb = InitMatrix(bW, aW); Matrix matrixc; Matrix gpuresult = InitMatrix(bW, aH); int matrixprop[6]; //CPU������� int start = clock(); matrixc = MM(matrixa, matrixb); int finish = clock(); double time = ((double)finish - (double)start) / CLOCKS_PER_SEC; cout << "CPU Time is " << time << endl; start = clock(); matrixprop[0] = matrixa.width; matrixprop[1] = matrixa.height; matrixprop[2] = matrixb.width; matrixprop[3] = matrixb.height; matrixprop[4] = matrixc.width; matrixprop[5] = matrixc.height; //�����Դ� int *ma, *mb, *mc, *mp; cudaMalloc((void**)&ma, sizeof(int) * matrixa.width * matrixa.height); cudaMalloc((void**)&mb, sizeof(int) * matrixb.width * matrixb.height); cudaMalloc((void**)&mc, sizeof(int) * matrixc.width * matrixc.height); cudaMalloc((void**)&mp, sizeof(int) * 6); //����ݸ��Ƶ��Դ��� cudaMemcpy(ma, matrixa.element, sizeof(int) * matrixa.width * matrixa.height, cudaMemcpyHostToDevice); cudaMemcpy(mb, matrixb.element, sizeof(int) * matrixb.width * matrixb.height, cudaMemcpyHostToDevice); cudaMemcpy(mp, matrixprop, sizeof(int) * 6, cudaMemcpyHostToDevice); //����CUDA���� MatrixMul << < blocknum, threadnum>> >(ma, mb, mc, mp); cudaThreadSynchronize(); //����ݴ��Դ��и��Ƴ��� cudaMemcpy(gpuresult.element, mc, sizeof(int) * gpuresult.width * gpuresult.height, cudaMemcpyDeviceToHost); finish = clock(); time = ((double)finish - (double)start) / CLOCKS_PER_SEC; cout << "GPU Time is " << time << endl; for (int i = 0; i < gpuresult.width * gpuresult.height; i++) { //printf("%d -- %d\n",matrixc.element[ i ],gpuresult.element[ i ]); if (matrixc.element[i] != gpuresult.element[i]) { printf("ERROR"); } } cudaFree(ma); cudaFree(mb); cudaFree(mc); cudaFree(mp); return 0; }
c1893d51c04f30df8edb9dc4a93815927cb907ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; //Checking if inside the valid range of output matrix if ((Row < numCRows) && (Col < numCColumns)) { float Cvalue = 0.0; //Setting sum starting value //Looping through row and column, multiplying the values and adding it to the total sum for (int i = 0; i < numAColumns; ++i) { Cvalue += A[Row * numAColumns + i] * B[Col + i * numCColumns]; } //Writing sum value to the output C[Row * numCColumns + Col] = Cvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipError_t err; //For fetching cuda errors err = hipMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(float)); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&deviceA, numARows * numAColumns * sizeof(float)); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = hipMalloc((void **)&deviceB, numBRows * numBColumns * sizeof(float)); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here err = hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numCColumns - 1) / TILE_WIDTH + 1, (numCRows - 1) / TILE_WIDTH + 1, 1); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply << <DimGrid, DimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { printf("%s at line %d\n", hipGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
c1893d51c04f30df8edb9dc4a93815927cb907ac.cu
#include <wb.h> #define TILE_WIDTH 16 #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; //Checking if inside the valid range of output matrix if ((Row < numCRows) && (Col < numCColumns)) { float Cvalue = 0.0; //Setting sum starting value //Looping through row and column, multiplying the values and adding it to the total sum for (int i = 0; i < numAColumns; ++i) { Cvalue += A[Row * numAColumns + i] * B[Col + i * numCColumns]; } //Writing sum value to the output C[Row * numCColumns + Col] = Cvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaError_t err; //For fetching cuda errors err = cudaMalloc((void **)&deviceC, numCRows * numCColumns * sizeof(float)); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&deviceA, numARows * numAColumns * sizeof(float)); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&deviceB, numBRows * numBColumns * sizeof(float)); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here err = cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } err = cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 DimGrid((numCColumns - 1) / TILE_WIDTH + 1, (numCRows - 1) / TILE_WIDTH + 1, 1); dim3 DimBlock(TILE_WIDTH, TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply << <DimGrid, DimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { printf("%s at line %d\n", cudaGetErrorString(err), __LINE__); exit(EXIT_FAILURE); } wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
be2916292d03053f2d9304640c3ee358be6346b4.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include <cuda_device_runtime_api.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> __host__ float barra(float* x, int n) { int i; float xb = 0.0; for (i = 0;i < n;i++) { xb += x[i]; } xb = xb / n; return xb; } __global__ void Saxx_device(float* x, float* c, float xb, int n) { int i = threadIdx.x; if (i < n) c[i] = (x[i] - xb) * (x[i] - xb); } __global__ void Saxy_device(float* x, float* y, float* d, float xb, float yb, int n) { int i = threadIdx.x; if (i < n) d[i] = (x[i] - xb) * (y[i] - yb); } int main() { // cuerpo de las variables del host float* x; float* y; float h_xb; float h_yb; float* h_saxy; float* h_saxx; float sxx; float sxy; int size; // cuerpo de las variables del device float* c; float* d_saxx; float* d_saxy; float* d; float* d_x; float* d_y; // numero de elementos printf("ingrese\n"); scanf("%d", &size); // alocamos las variables en el host x = (float*)malloc(sizeof(float) * size); y= (float*)malloc(sizeof(float) * size); // llenamos los vectores for (int i = 0;i < size;i++) { x[i] = rand() + (float)i * 0.01; y[i] = rand() - (float)i * 0.01; } // llamamos a barra para calcular el promedio h_xb= barra(x, size); h_yb= barra(y, size); // alocamos memoria en el device hipMalloc((void**)&c, sizeof(float) * size); hipMalloc((void**)&d, sizeof(float) * size); hipMalloc((void**)&d_x, sizeof(float) * size); hipMalloc((void**)&d_y, sizeof(float) * size); // pasando el contenido al device hipMemcpy(d_x, x, sizeof(float) * size, hipMemcpyHostToDevice); hipMemcpy(d_y, y, sizeof(float) * size, hipMemcpyHostToDevice); // invocamos los kernel hipLaunchKernelGGL(( Saxx_device) , dim3(1), dim3(size) , 0, 0, d_x,c, h_xb, size); hipLaunchKernelGGL(( Saxy_device) , dim3(1), dim3(size) , 0, 0, d_x,d_y,d, h_xb, h_yb, size); // alocamos en el host h_saxx = (float*)malloc(sizeof(float) * size); h_saxy = (float*)malloc(sizeof(float) * size); // obtenemos del device los elementos hipMemcpy(h_saxx, c, sizeof(float) * size, hipMemcpyDeviceToHost); hipMemcpy(h_saxy, d, sizeof(float) * size, hipMemcpyDeviceToHost); // llamamos a barra sxx = barra(h_saxx, size); sxy = barra(h_saxy, size); // declaramos variables de regresion float beta1G, beta0G; beta1G = sxy / sxx; // coeficiente beta 1 gorro beta0G = h_xb * (-beta1G) + h_yb ; // coeficiente beta 0 gorro printf(" y = %f + %f x", beta0G, beta1G); return 0; }
be2916292d03053f2d9304640c3ee358be6346b4.cu
#include<stdio.h> #include<stdlib.h> #include <cuda_device_runtime_api.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> __host__ float barra(float* x, int n) { int i; float xb = 0.0; for (i = 0;i < n;i++) { xb += x[i]; } xb = xb / n; return xb; } __global__ void Saxx_device(float* x, float* c, float xb, int n) { int i = threadIdx.x; if (i < n) c[i] = (x[i] - xb) * (x[i] - xb); } __global__ void Saxy_device(float* x, float* y, float* d, float xb, float yb, int n) { int i = threadIdx.x; if (i < n) d[i] = (x[i] - xb) * (y[i] - yb); } int main() { // cuerpo de las variables del host float* x; float* y; float h_xb; float h_yb; float* h_saxy; float* h_saxx; float sxx; float sxy; int size; // cuerpo de las variables del device float* c; float* d_saxx; float* d_saxy; float* d; float* d_x; float* d_y; // numero de elementos printf("ingrese\n"); scanf("%d", &size); // alocamos las variables en el host x = (float*)malloc(sizeof(float) * size); y= (float*)malloc(sizeof(float) * size); // llenamos los vectores for (int i = 0;i < size;i++) { x[i] = rand() + (float)i * 0.01; y[i] = rand() - (float)i * 0.01; } // llamamos a barra para calcular el promedio h_xb= barra(x, size); h_yb= barra(y, size); // alocamos memoria en el device cudaMalloc((void**)&c, sizeof(float) * size); cudaMalloc((void**)&d, sizeof(float) * size); cudaMalloc((void**)&d_x, sizeof(float) * size); cudaMalloc((void**)&d_y, sizeof(float) * size); // pasando el contenido al device cudaMemcpy(d_x, x, sizeof(float) * size, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, sizeof(float) * size, cudaMemcpyHostToDevice); // invocamos los kernel Saxx_device <<< 1, size >>> (d_x,c, h_xb, size); Saxy_device <<< 1, size >>> (d_x,d_y,d, h_xb, h_yb, size); // alocamos en el host h_saxx = (float*)malloc(sizeof(float) * size); h_saxy = (float*)malloc(sizeof(float) * size); // obtenemos del device los elementos cudaMemcpy(h_saxx, c, sizeof(float) * size, cudaMemcpyDeviceToHost); cudaMemcpy(h_saxy, d, sizeof(float) * size, cudaMemcpyDeviceToHost); // llamamos a barra sxx = barra(h_saxx, size); sxy = barra(h_saxy, size); // declaramos variables de regresion float beta1G, beta0G; beta1G = sxy / sxx; // coeficiente beta 1 gorro beta0G = h_xb * (-beta1G) + h_yb ; // coeficiente beta 0 gorro printf(" y = %f + %f x", beta0G, beta1G); return 0; }
9403c569ca7a3180e0052acc29295f748b75b565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void histDupeKernel(const float* data1, const float* data2, const float* confidence1, const float* confidence2, int* ids1, int* ids2, int* results_id1, int* results_id2, float* results_similarity, int* result_count, const int N1, const int N2, const int max_results) { const unsigned int thread = threadIdx.x; // Thread index within block const unsigned int block = blockIdx.x; // Block index const unsigned int block_size = blockDim.x; // Size of each block const unsigned int block_start = block_size * block; // Index of the start of the block const unsigned int index = block_start + thread; // Index of this thread //__shared__ float conf[64]; // Shared array of confidence values for all histograms owned by this block //conf[thread] = confidence1[index]; // Coalesced read of confidence values float conf = confidence1[index]; int id = ids1[index]; __shared__ float hists[128 * 64]; // Shared array of all histograms owned by this block for (unsigned int i = 0; i < 64; i++) { hists[i * 128 + thread] = data1[(block_start + i) * 128 + thread]; // Coalesced read of first half of histogram hists[i * 128 + thread + 64] = data1[(block_start + i) * 128 + 64 + thread]; // Coalesced read of second half of histogram } __shared__ float other[128]; // Histogram to compare all owned histograms against parallely for (unsigned int i = 0; i < N2 && *result_count < max_results; i++) { other[thread] = data2[i * 128 + thread]; // Coalesced read of first half of other histogram other[thread + 64] = data2[i * 128 + thread + 64]; // Second half __syncthreads(); // Ensure all values read if (index < N1) { float d = 0; for (unsigned int k = 0; k < 128; k++) { // Compute sum of distances between thread-owned histogram and shared histogram d += fabsf(hists[thread * 128 + k] - other[k]); } d = 1 - (d / 8); // Massage the difference into a nice % similarity number, between 0 and 1 int other_id = ids2[i]; if (other_id != id && d > fmaxf(conf, confidence2[i])) { // Don't compare against self, only compare using highest confidence int result_index = atomicAdd(result_count, 1); // Increment result count by one atomically (returns value before increment) if (result_index < max_results) { // Store resulting pair results_similarity[result_index] = d; results_id1[result_index] = id; results_id2[result_index] = other_id; } } } __syncthreads(); // Ensure all threads have finished before looping and reading new shared histogram } }
9403c569ca7a3180e0052acc29295f748b75b565.cu
#include "includes.h" __global__ void histDupeKernel(const float* data1, const float* data2, const float* confidence1, const float* confidence2, int* ids1, int* ids2, int* results_id1, int* results_id2, float* results_similarity, int* result_count, const int N1, const int N2, const int max_results) { const unsigned int thread = threadIdx.x; // Thread index within block const unsigned int block = blockIdx.x; // Block index const unsigned int block_size = blockDim.x; // Size of each block const unsigned int block_start = block_size * block; // Index of the start of the block const unsigned int index = block_start + thread; // Index of this thread //__shared__ float conf[64]; // Shared array of confidence values for all histograms owned by this block //conf[thread] = confidence1[index]; // Coalesced read of confidence values float conf = confidence1[index]; int id = ids1[index]; __shared__ float hists[128 * 64]; // Shared array of all histograms owned by this block for (unsigned int i = 0; i < 64; i++) { hists[i * 128 + thread] = data1[(block_start + i) * 128 + thread]; // Coalesced read of first half of histogram hists[i * 128 + thread + 64] = data1[(block_start + i) * 128 + 64 + thread]; // Coalesced read of second half of histogram } __shared__ float other[128]; // Histogram to compare all owned histograms against parallely for (unsigned int i = 0; i < N2 && *result_count < max_results; i++) { other[thread] = data2[i * 128 + thread]; // Coalesced read of first half of other histogram other[thread + 64] = data2[i * 128 + thread + 64]; // Second half __syncthreads(); // Ensure all values read if (index < N1) { float d = 0; for (unsigned int k = 0; k < 128; k++) { // Compute sum of distances between thread-owned histogram and shared histogram d += fabsf(hists[thread * 128 + k] - other[k]); } d = 1 - (d / 8); // Massage the difference into a nice % similarity number, between 0 and 1 int other_id = ids2[i]; if (other_id != id && d > fmaxf(conf, confidence2[i])) { // Don't compare against self, only compare using highest confidence int result_index = atomicAdd(result_count, 1); // Increment result count by one atomically (returns value before increment) if (result_index < max_results) { // Store resulting pair results_similarity[result_index] = d; results_id1[result_index] = id; results_id2[result_index] = other_id; } } } __syncthreads(); // Ensure all threads have finished before looping and reading new shared histogram } }
41f32b4f32ea66cdd4c31d491bbfb76661a9218b.hip
// !!! This is a file automatically generated by hipify!!! /* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsc/private/cudavecimpl.h> /* cublas definitions are here */ #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnCpotrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnCpotrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnCpotrs((a),(b),(c),(d),(hipComplex*)(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnCsytrf((a),(b),(c),(hipComplex*)(d),(e),(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnCsytrf_bufferSize((a),(b),(hipComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnCgetrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnCgetrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnCgetrs((a),(b),(c),(d),(hipComplex*)(e),(f),(g),(hipComplex*)(h),(i),(j)) #define hipsolverDnXgeqrf_bufferSize(a,b,c,d,e,f) hipsolverDnCgeqrf_bufferSize((a),(b),(c),(hipComplex*)(d),(e),(f)) #define hipsolverDnXgeqrf(a,b,c,d,e,f,g,h,i) hipsolverDnCgeqrf((a),(b),(c),(hipComplex*)(d),(e),(hipComplex*)(f),(hipComplex*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) hipsolverDnCunmqr_bufferSize((a),(b),(c),(d),(e),(f),(hipComplex*)(g),(h),(hipComplex*)(i),(hipComplex*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) hipsolverDnCunmqr((a),(b),(c),(d),(e),(f),(hipComplex*)(g),(h),(hipComplex*)(i),(hipComplex*)(j),(k),(hipComplex*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) hipblasCtrsm((a),(b),(c),(d),(e),(f),(g),(hipComplex*)(h),(hipComplex*)(i),(j),(hipComplex*)(k),(l)) #else /* complex double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnZpotrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnZpotrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnZpotrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnZsytrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnZsytrf_bufferSize((a),(b),(hipDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnZgetrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnZgetrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnZgetrs((a),(b),(c),(d),(hipDoubleComplex*)(e),(f),(g),(hipDoubleComplex*)(h),(i),(j)) #define hipsolverDnXgeqrf_bufferSize(a,b,c,d,e,f) hipsolverDnZgeqrf_bufferSize((a),(b),(c),(hipDoubleComplex*)(d),(e),(f)) #define hipsolverDnXgeqrf(a,b,c,d,e,f,g,h,i) hipsolverDnZgeqrf((a),(b),(c),(hipDoubleComplex*)(d),(e),(hipDoubleComplex*)(f),(hipDoubleComplex*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) hipsolverDnZunmqr_bufferSize((a),(b),(c),(d),(e),(f),(hipDoubleComplex*)(g),(h),(hipDoubleComplex*)(i),(hipDoubleComplex*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) hipsolverDnZunmqr((a),(b),(c),(d),(e),(f),(hipDoubleComplex*)(g),(h),(hipDoubleComplex*)(i),(hipDoubleComplex*)(j),(k),(hipDoubleComplex*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) hipblasZtrsm((a),(b),(c),(d),(e),(f),(g),(hipDoubleComplex*)(h),(hipDoubleComplex*)(i),(j),(hipDoubleComplex*)(k),(l)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #define hipsolverDnXgeqrf_bufferSize(a,b,c,d,e,f) hipsolverDnSgeqrf_bufferSize((a),(b),(c),(float*)(d),(e),(f)) #define hipsolverDnXgeqrf(a,b,c,d,e,f,g,h,i) hipsolverDnSgeqrf((a),(b),(c),(float*)(d),(e),(float*)(f),(float*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) hipsolverDnSormqr_bufferSize((a),(b),(c),(d),(e),(f),(float*)(g),(h),(float*)(i),(float*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) hipsolverDnSormqr((a),(b),(c),(d),(e),(f),(float*)(g),(h),(float*)(i),(float*)(j),(k),(float*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) hipblasStrsm((a),(b),(c),(d),(e),(f),(g),(float*)(h),(float*)(i),(j),(float*)(k),(l)) #else /* real double */ #define hipsolverDnXpotrf(a,b,c,d,e,f,g,h) hipsolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define hipsolverDnXpotrf_bufferSize(a,b,c,d,e,f) hipsolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define hipsolverDnXpotrs(a,b,c,d,e,f,g,h,i) hipsolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) hipsolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) hipsolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) hipsolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) hipsolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) hipsolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #define hipsolverDnXgeqrf_bufferSize(a,b,c,d,e,f) hipsolverDnDgeqrf_bufferSize((a),(b),(c),(double*)(d),(e),(f)) #define hipsolverDnXgeqrf(a,b,c,d,e,f,g,h,i) hipsolverDnDgeqrf((a),(b),(c),(double*)(d),(e),(double*)(f),(double*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) hipsolverDnDormqr_bufferSize((a),(b),(c),(d),(e),(f),(double*)(g),(h),(double*)(i),(double*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) hipsolverDnDormqr((a),(b),(c),(d),(e),(f),(double*)(g),(h),(double*)(i),(double*)(j),(k),(double*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) hipblasDtrsm((a),(b),(c),(d),(e),(f),(g),(double*)(h),(double*)(i),(j),(double*)(k),(l)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ PetscBool user_alloc; PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */ PetscBool unplaced_user_alloc; /* factorization support */ PetscCuBLASInt *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_tau; /* device QR tau vector */ PetscScalar *d_fact_work; /* device workspace */ PetscCuBLASInt fact_lwork; PetscCuBLASInt *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscBool iscuda; hipError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) PetscFunctionReturn(0); /* it may happen CPU preallocation has not been performed */ ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); if (cA->lda <= 0) cA->lda = A->rmap->n; if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } if (!d_data) { /* petsc-allocated storage */ size_t sz; ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr); sz = cA->lda*A->cmap->n*sizeof(PetscScalar); cerr = hipMalloc((void**)&dA->d_v,sz);CHKERRCUDA(cerr); cerr = hipMemset(dA->d_v,0,sz);CHKERRCUDA(cerr); dA->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ dA->d_v = d_data; dA->user_alloc = PETSC_TRUE; } A->offloadmask = PETSC_OFFLOAD_GPU; A->preallocated = PETSC_TRUE; A->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt n = A->cmap->n,m = A->rmap->n; cerr = hipMemcpy2D(cA->v,cA->lda*sizeof(PetscScalar),dA->d_v,cA->lda*sizeof(PetscScalar),m*sizeof(PetscScalar),n,hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; hipError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { if (!dA->d_v) { /* Allocate GPU memory if not present */ ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt n = A->cmap->n,m = A->rmap->n; cerr = hipMemcpy2D(dA->d_v,cA->lda*sizeof(PetscScalar),cA->v,cA->lda*sizeof(PetscScalar),m*sizeof(PetscScalar),n,hipMemcpyHostToDevice);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatCopy_SeqDenseCUDA(Mat A,Mat B,MatStructure str) { Mat_SeqDense *a = (Mat_SeqDense*)A->data,*b = (Mat_SeqDense*)B->data; PetscErrorCode ierr; const PetscScalar *va; PetscScalar *vb; PetscInt lda1=a->lda,lda2=b->lda,m=A->rmap->n,n=A->cmap->n; hipError_t cerr; PetscFunctionBegin; /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ if (A->ops->copy != B->ops->copy) { ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); PetscFunctionReturn(0); } if (m != B->rmap->n || n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"size(B) != size(A)"); ierr = MatDenseCUDAGetArrayRead(A,&va);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(B,&vb);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda1>m || lda2>m) { cerr = hipMemcpy2D(vb,lda2*sizeof(PetscScalar),va,lda1*sizeof(PetscScalar),m*sizeof(PetscScalar),n,hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(vb,va,m*(n*sizeof(PetscScalar)),hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(B,&vb);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscScalar *va; PetscInt lda=a->lda,m = A->rmap->n,n = A->cmap->n; hipError_t cerr; PetscFunctionBegin; ierr = MatDenseCUDAGetArrayWrite(A,&va);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda>m) { cerr = hipMemset2D(va,lda*sizeof(PetscScalar),0,m*sizeof(PetscScalar),n);CHKERRCUDA(cerr); } else { cerr = hipMemset(va,0,m*(n*sizeof(PetscScalar)));CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->unplacedarray = dA->d_v; dA->unplaced_user_alloc = dA->user_alloc; dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_TRUE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->d_v = dA->unplacedarray; dA->user_alloc = dA->unplaced_user_alloc; dA->unplacedarray = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; hipError_t cerr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (!dA->d_v) { ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; hipError_t ccer; cusolverStatus_t cerr; hipsolverDnHandle_t handle; PetscCuBLASInt n,lda; #if defined(PETSC_USE_DEBUG) PetscCuBLASInt info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ PetscCuBLASInt il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = hipFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal(Mat A, Vec xx, Vec yy, PetscBool transpose, PetscErrorCode (*matsolve)(Mat,PetscScalar*,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscBool)) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *y; PetscCuBLASInt m=0, k=0; PetscBool xiscuda, yiscuda, aiscuda; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)xx,VECSEQCUDA,&xiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)yy,VECSEQCUDA,&yiscuda);CHKERRQ(ierr); { const PetscScalar *x; PetscBool xishost = PETSC_TRUE; /* The logic here is to try to minimize the amount of memory copying: if we call VecCUDAGetArrayRead(X,&x) every time xiscuda and the data is not offloaded to the GPU yet, then the data is copied to the GPU. But we are only trying to get the data in order to copy it into the y array. So the array x will be wherever the data already is so that only one memcpy is performed */ if (xiscuda && xx->offloadmask & PETSC_OFFLOAD_GPU) { ierr = VecCUDAGetArrayRead(xx, &x);CHKERRQ(ierr); xishost = PETSC_FALSE; } else { ierr = VecGetArrayRead(xx, &x);CHKERRQ(ierr); } if (k < m || !yiscuda) { if (!dA->workvec) { ierr = VecCreateSeqCUDA(PetscObjectComm((PetscObject)A), m, &(dA->workvec));CHKERRQ(ierr); } ierr = VecCUDAGetArrayWrite(dA->workvec, &y);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayWrite(yy,&y);CHKERRQ(ierr); } cerr = hipMemcpy(y,x,m*sizeof(PetscScalar),xishost ? hipMemcpyHostToDevice : hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&aiscuda);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } ierr = (*matsolve) (A, y, m, m, 1, k, transpose);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (k < m || !yiscuda) { PetscScalar *yv; /* The logic here is that the data is not yet in either yy's GPU array or its CPU array. There is nothing in the interface to say where the user would like it to end up. So we choose the GPU, because it is the faster option */ if (yiscuda) { ierr = VecCUDAGetArrayWrite(yy,&yv);CHKERRQ(ierr); } else { ierr = VecGetArray(yy,&yv);CHKERRQ(ierr); } cerr = hipMemcpy(yv,y,k*sizeof(PetscScalar),yiscuda ? hipMemcpyDeviceToDevice: hipMemcpyDeviceToHost);CHKERRCUDA(cerr); if (yiscuda) { ierr = VecCUDARestoreArrayWrite(yy,&yv);CHKERRQ(ierr); } else { ierr = VecRestoreArray(yy,&yv);CHKERRQ(ierr); } ierr = VecCUDARestoreArrayWrite(dA->workvec, &y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArrayWrite(yy,&y);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_Internal(Mat A, Mat B, Mat X, PetscBool transpose, PetscErrorCode (*matsolve)(Mat,PetscScalar*,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscBool)) { PetscScalar *y; PetscInt n, _ldb, _ldx; PetscBool biscuda, xiscuda, aiscuda; PetscCuBLASInt nrhs=0,m=0,k=0,ldb=0,ldx=0,ldy=0; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); ierr = MatGetSize(B,NULL,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(n,&nrhs);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&_ldb);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(_ldb, &ldb);CHKERRQ(ierr); ierr = MatDenseGetLDA(X,&_ldx);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(_ldx, &ldx);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&xiscuda);CHKERRQ(ierr); { /* The logic here is to try to minimize the amount of memory copying: if we call MatDenseCUDAGetArrayRead(B,&b) every time biscuda and the data is not offloaded to the GPU yet, then the data is copied to the GPU. But we are only trying to get the data in order to copy it into the y array. So the array b will be wherever the data already is so that only one memcpy is performed */ const PetscScalar *b; /* some copying from B will be involved */ PetscBool bishost = PETSC_TRUE; if (biscuda && B->offloadmask & PETSC_OFFLOAD_GPU) { ierr = MatDenseCUDAGetArrayRead(B,&b);CHKERRQ(ierr); bishost = PETSC_FALSE; } else { ierr = MatDenseGetArrayRead(B,&b);CHKERRQ(ierr); } if (ldx < m || !xiscuda) { /* X's array cannot serve as the array (too small or not on device), B's * array cannot serve as the array (const), so allocate a new array */ ldy = m; cerr = hipMalloc((void**)&y,nrhs*m*sizeof(PetscScalar));CHKERRCUDA(cerr); } else { /* X's array should serve as the array */ ldy = ldx; ierr = MatDenseCUDAGetArrayWrite(X,&y);CHKERRQ(ierr); } cerr = hipMemcpy2D(y,ldy*sizeof(PetscScalar),b,ldb*sizeof(PetscScalar),m*sizeof(PetscScalar),nrhs,bishost ? hipMemcpyHostToDevice: hipMemcpyDeviceToDevice);CHKERRCUDA(cerr); if (bishost) { ierr = MatDenseRestoreArrayRead(B,&b);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayRead(B,&b);CHKERRQ(ierr); } } ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&aiscuda);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } ierr = (*matsolve) (A, y, ldy, m, nrhs, k, transpose);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (ldx < m || !xiscuda) { PetscScalar *x; /* The logic here is that the data is not yet in either X's GPU array or its CPU array. There is nothing in the interface to say where the user would like it to end up. So we choose the GPU, because it is the faster option */ if (xiscuda) { ierr = MatDenseCUDAGetArrayWrite(X,&x);CHKERRQ(ierr); } else { ierr = MatDenseGetArray(X,&x);CHKERRQ(ierr); } cerr = hipMemcpy2D(x,ldx*sizeof(PetscScalar),y,ldy*sizeof(PetscScalar),k*sizeof(PetscScalar),nrhs,xiscuda ? hipMemcpyDeviceToDevice: hipMemcpyDeviceToHost);CHKERRCUDA(cerr); if (xiscuda) { ierr = MatDenseCUDARestoreArrayWrite(X,&x);CHKERRQ(ierr); } else { ierr = MatDenseRestoreArray(X,&x);CHKERRQ(ierr); } cerr = hipFree(y);CHKERRCUDA(cerr); } else { ierr = MatDenseCUDARestoreArrayWrite(X,&y);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_LU(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda; hipsolverDnHandle_t handle; hipError_t ccer; cusolverStatus_t cerr; int info; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",m,k);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,T ? HIPBLAS_OP_T : HIPBLAS_OP_N,m,nrhs,da,lda,dA->d_fact_ipiv,x,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG)) { ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } ierr = PetscLogGpuFlops(nrhs*(2.0*m*m - m));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_Cholesky(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda; hipsolverDnHandle_t handle; hipError_t ccer; cusolverStatus_t cerr; int info; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",m,k);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit hipErrorNotReady (error 34) due to "device not ready" on CUDA API call to hipEventQuery. */ cerr = hipsolverDnXpotrs(handle,HIPBLAS_FILL_MODE_LOWER,m,nrhs,da,lda,x,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG)) { ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } ierr = PetscLogGpuFlops(nrhs*(2.0*m*m - m));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_QR(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda, rank; hipsolverDnHandle_t handle; hipblasHandle_t bhandle; hipError_t ccer; cusolverStatus_t csrr; hipblasStatus_t cbrr; int info; hipblasOperation_t trans; PetscScalar one = 1.; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(mat->rank,&rank);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&bhandle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"QR solve %d x %d on backend\n",m,k);CHKERRQ(ierr); if (!T) { if (PetscDefined(USE_COMPLEX)) { trans = HIPBLAS_OP_C; } else { trans = HIPBLAS_OP_T; } csrr = cusolverDnXormqr(handle, HIPBLAS_SIDE_LEFT, trans, m, nrhs, rank, da, lda, dA->d_fact_tau, x, ldx, dA->d_fact_work, dA->fact_lwork, dA->d_fact_info);CHKERRCUSOLVER(csrr); if (PetscDefined(USE_DEBUG)) { ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } cbrr = cublasXtrsm(bhandle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, rank, nrhs, &one, da, lda, x, ldx);CHKERRCUBLAS(cbrr); } else { cbrr = cublasXtrsm(bhandle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_T, HIPBLAS_DIAG_NON_UNIT, rank, nrhs, &one, da, lda, x, ldx);CHKERRCUBLAS(cbrr); csrr = cusolverDnXormqr(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_OP_N, m, nrhs, rank, da, lda, dA->d_fact_tau, x, ldx, dA->d_fact_work, dA->fact_lwork, dA->d_fact_info);CHKERRCUSOLVER(csrr); if (PetscDefined(USE_DEBUG)) { ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscLogFlops(nrhs*(4.0*m*mat->rank - PetscSqr(mat->rank)));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_LU(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_LU(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Cholesky(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_Cholesky(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_QR(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_QR(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_LU(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_LU(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_Cholesky(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_Cholesky(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_QR(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_QR(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA_LU; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_LU; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_LU; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_LU; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = hipsolverDnXpotrf_bufferSize(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = hipsolverDnXpotrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,HIPBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA_Cholesky; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_Cholesky; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_Cholesky; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_Cholesky; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatQRFactor_SeqDenseCUDA(Mat A,IS col,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt m,min,max,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; hipsolverDnHandle_t handle; hipError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"QR factor %d x %d on backend\n",m,n);CHKERRQ(ierr); max = PetscMax(m,n); min = PetscMin(m,n); if (!dA->d_fact_tau) { ccer = hipMalloc((void**)&dA->d_fact_tau,min*sizeof(*dA->d_fact_tau));CHKERRCUDA(ccer); } if (!dA->d_fact_ipiv) { ccer = hipMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = hipsolverDnXgeqrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = hipMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = hipMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } if (!dA->workvec) { ierr = VecCreateSeqCUDA(PetscObjectComm((PetscObject)A), m, &(dA->workvec));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = hipsolverDnXgeqrf(handle,m,n,da,lda,dA->d_fact_tau,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = hipMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), hipMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_QR; a->rank = min; ierr = PetscLogGpuFlops(2.0*min*min*(max-min/3.0));CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA_QR; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_QR; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_QR; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_QR; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB) { const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; PetscCuBLASInt m,n,k; PetscInt alda,blda,clda; PetscErrorCode ierr; hipblasHandle_t cublasv2handle; PetscBool Aiscuda,Biscuda; hipblasStatus_t berr; PetscFunctionBegin; /* we may end up with SEQDENSE as one of the arguments */ ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } ierr = PetscCuBLASIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscCuBLASIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? HIPBLAS_OP_T : HIPBLAS_OP_N,tB ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; PetscCuBLASInt m, n, lda; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? HIPBLAS_OP_T : HIPBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayWrite_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!mat->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha) { Mat_SeqDense *y = (Mat_SeqDense*)Y->data; PetscScalar *dy; PetscCuBLASInt j,N,m,lday,one = 1; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(Y->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lday>m) { for (j=0; j<Y->cmap->n; j++) { berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr); } } else { berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(N);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; PetscCuBLASInt j,N,m,ldax,lday,one = 1; hipblasHandle_t cublasv2handle; hipblasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscCuBLASIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; hipError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = hipFree(dA->d_v);CHKERRCUDA(cerr); } cerr = hipFree(dA->d_fact_tau);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = hipFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; MatDuplicateOption hcpvalues = (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) ? MAT_DO_NOT_COPY_VALUES : cpvalues; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,hcpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && hcpvalues != MAT_COPY_VALUES) { ierr = MatCopy_SeqDenseCUDA(A,*B,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscScalar *x; PetscBool viscuda; hipError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr); if (viscuda && !v->boundtocpu) { /* update device data */ ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToHost);CHKERRCUDA(cerr); } else { cerr = hipMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr); } else { /* update host data */ ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask & PETSC_OFFLOAD_CPU) { ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr); } else if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = hipMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),hipMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr); } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; (*fact)->ops->ilufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } else if (ftype == MAT_FACTOR_QR) { ierr = PetscObjectComposeFunction((PetscObject)(*fact),"MatQRFactor_C",MatQRFactor_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*fact),"MatQRFactorSymbolic_C",MatQRFactorSymbolic_SeqDense);CHKERRQ(ierr); } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr); ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->cmat && cend-cbegin != a->cmat->cmap->N) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); if (!a->cmat) { ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr); } else { ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr); if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } a->cmat->offloadmask = A->offloadmask; a->matinuse = cbegin + 1; *v = a->cmat; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first"); if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix"); if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()"); a->matinuse = 0; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr); ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseSetLDA_SeqDenseCUDA(Mat A,PetscInt lda) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool data; PetscFunctionBegin; data = (PetscBool)((A->rmap->n > 0 && A->cmap->n > 0) ? (dA->d_v ? PETSC_TRUE : PETSC_FALSE) : PETSC_FALSE); if (!dA->user_alloc && data && cA->lda!=lda) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"LDA cannot be changed after allocation of internal storage"); if (lda < A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"LDA %D must be at least matrix dimension %D",lda,A->rmap->n); cA->lda = lda; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); A->boundtocpu = flg; if (!flg) { PetscBool iscuda; ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = VecDestroy(&a->cvec);CHKERRQ(ierr); } ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatQRFactor_C",MatQRFactor_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA; A->ops->scale = MatScale_SeqDenseCUDA; A->ops->copy = MatCopy_SeqDenseCUDA; A->ops->zeroentries = MatZeroEntries_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatQRFactor_C",MatQRFactor_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->getcolumnvector = MatGetColumnVector_SeqDense; A->ops->scale = MatScale_SeqDense; A->ops->copy = MatCopy_SeqDense; A->ops->zeroentries = MatZeroEntries_SeqDense; } if (a->cmat) { ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*@C MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA. Collective Input Parameters: + comm - MPI communicator . m - number of rows . n - number of columns - data - optional location of GPU matrix data. Set data=NULL for PETSc to control matrix memory allocation. Output Parameter: . A - the matrix Notes: Level: intermediate .seealso: MatCreate(), MatCreateSeqDense() @*/ PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size); ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
41f32b4f32ea66cdd4c31d491bbfb76661a9218b.cu
/* Defines the matrix operations for sequential dense with CUDA */ #include <petscpkg_version.h> #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <../src/mat/impls/dense/seq/dense.h> /*I "petscmat.h" I*/ #include <petsc/private/cudavecimpl.h> /* cublas definitions are here */ #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnCpotrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnCpotrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnCpotrs((a),(b),(c),(d),(cuComplex*)(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnCpotri((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnCpotri_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnCsytrf((a),(b),(c),(cuComplex*)(d),(e),(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnCsytrf_bufferSize((a),(b),(cuComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnCgetrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnCgetrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnCgetrs((a),(b),(c),(d),(cuComplex*)(e),(f),(g),(cuComplex*)(h),(i),(j)) #define cusolverDnXgeqrf_bufferSize(a,b,c,d,e,f) cusolverDnCgeqrf_bufferSize((a),(b),(c),(cuComplex*)(d),(e),(f)) #define cusolverDnXgeqrf(a,b,c,d,e,f,g,h,i) cusolverDnCgeqrf((a),(b),(c),(cuComplex*)(d),(e),(cuComplex*)(f),(cuComplex*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) cusolverDnCunmqr_bufferSize((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(cuComplex*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusolverDnCunmqr((a),(b),(c),(d),(e),(f),(cuComplex*)(g),(h),(cuComplex*)(i),(cuComplex*)(j),(k),(cuComplex*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) cublasCtrsm((a),(b),(c),(d),(e),(f),(g),(cuComplex*)(h),(cuComplex*)(i),(j),(cuComplex*)(k),(l)) #else /* complex double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnZpotrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnZpotrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnZpotrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnZpotri((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnZpotri_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnZsytrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnZsytrf_bufferSize((a),(b),(cuDoubleComplex*)(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnZgetrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnZgetrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnZgetrs((a),(b),(c),(d),(cuDoubleComplex*)(e),(f),(g),(cuDoubleComplex*)(h),(i),(j)) #define cusolverDnXgeqrf_bufferSize(a,b,c,d,e,f) cusolverDnZgeqrf_bufferSize((a),(b),(c),(cuDoubleComplex*)(d),(e),(f)) #define cusolverDnXgeqrf(a,b,c,d,e,f,g,h,i) cusolverDnZgeqrf((a),(b),(c),(cuDoubleComplex*)(d),(e),(cuDoubleComplex*)(f),(cuDoubleComplex*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) cusolverDnZunmqr_bufferSize((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(cuDoubleComplex*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusolverDnZunmqr((a),(b),(c),(d),(e),(f),(cuDoubleComplex*)(g),(h),(cuDoubleComplex*)(i),(cuDoubleComplex*)(j),(k),(cuDoubleComplex*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) cublasZtrsm((a),(b),(c),(d),(e),(f),(g),(cuDoubleComplex*)(h),(cuDoubleComplex*)(i),(j),(cuDoubleComplex*)(k),(l)) #endif #else /* real single */ #if defined(PETSC_USE_REAL_SINGLE) #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnSpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnSpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnSpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnSpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnSpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnSsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnSsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnSgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnSgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnSgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #define cusolverDnXgeqrf_bufferSize(a,b,c,d,e,f) cusolverDnSgeqrf_bufferSize((a),(b),(c),(float*)(d),(e),(f)) #define cusolverDnXgeqrf(a,b,c,d,e,f,g,h,i) cusolverDnSgeqrf((a),(b),(c),(float*)(d),(e),(float*)(f),(float*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) cusolverDnSormqr_bufferSize((a),(b),(c),(d),(e),(f),(float*)(g),(h),(float*)(i),(float*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusolverDnSormqr((a),(b),(c),(d),(e),(f),(float*)(g),(h),(float*)(i),(float*)(j),(k),(float*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) cublasStrsm((a),(b),(c),(d),(e),(f),(g),(float*)(h),(float*)(i),(j),(float*)(k),(l)) #else /* real double */ #define cusolverDnXpotrf(a,b,c,d,e,f,g,h) cusolverDnDpotrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotrf_bufferSize(a,b,c,d,e,f) cusolverDnDpotrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXpotrs(a,b,c,d,e,f,g,h,i) cusolverDnDpotrs((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXpotri(a,b,c,d,e,f,g,h) cusolverDnDpotri((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXpotri_bufferSize(a,b,c,d,e,f) cusolverDnDpotri_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXsytrf(a,b,c,d,e,f,g,h,i) cusolverDnDsytrf((a),(b),(c),(d),(e),(f),(g),(h),(i)) #define cusolverDnXsytrf_bufferSize(a,b,c,d,e) cusolverDnDsytrf_bufferSize((a),(b),(c),(d),(e)) #define cusolverDnXgetrf(a,b,c,d,e,f,g,h) cusolverDnDgetrf((a),(b),(c),(d),(e),(f),(g),(h)) #define cusolverDnXgetrf_bufferSize(a,b,c,d,e,f) cusolverDnDgetrf_bufferSize((a),(b),(c),(d),(e),(f)) #define cusolverDnXgetrs(a,b,c,d,e,f,g,h,i,j) cusolverDnDgetrs((a),(b),(c),(d),(e),(f),(g),(h),(i),(j)) #define cusolverDnXgeqrf_bufferSize(a,b,c,d,e,f) cusolverDnDgeqrf_bufferSize((a),(b),(c),(double*)(d),(e),(f)) #define cusolverDnXgeqrf(a,b,c,d,e,f,g,h,i) cusolverDnDgeqrf((a),(b),(c),(double*)(d),(e),(double*)(f),(double*)(g),(h),(i)) #define cusolverDnXormqr_bufferSize(a,b,c,d,e,f,g,h,i,j,k,l) cusolverDnDormqr_bufferSize((a),(b),(c),(d),(e),(f),(double*)(g),(h),(double*)(i),(double*)(j),(k),(l)) #define cusolverDnXormqr(a,b,c,d,e,f,g,h,i,j,k,l,m,n) cusolverDnDormqr((a),(b),(c),(d),(e),(f),(double*)(g),(h),(double*)(i),(double*)(j),(k),(double*)(l),(m),(n)) #define cublasXtrsm(a,b,c,d,e,f,g,h,i,j,k,l) cublasDtrsm((a),(b),(c),(d),(e),(f),(g),(double*)(h),(double*)(i),(j),(double*)(k),(l)) #endif #endif typedef struct { PetscScalar *d_v; /* pointer to the matrix on the GPU */ PetscBool user_alloc; PetscScalar *unplacedarray; /* if one called MatCUDADensePlaceArray(), this is where it stashed the original */ PetscBool unplaced_user_alloc; /* factorization support */ PetscCuBLASInt *d_fact_ipiv; /* device pivots */ PetscScalar *d_fact_tau; /* device QR tau vector */ PetscScalar *d_fact_work; /* device workspace */ PetscCuBLASInt fact_lwork; PetscCuBLASInt *d_fact_info; /* device info */ /* workspace */ Vec workvec; } Mat_SeqDenseCUDA; PetscErrorCode MatSeqDenseCUDASetPreallocation(Mat A, PetscScalar *d_data) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscBool iscuda; cudaError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) PetscFunctionReturn(0); /* it may happen CPU preallocation has not been performed */ ierr = PetscLayoutSetUp(A->rmap);CHKERRQ(ierr); ierr = PetscLayoutSetUp(A->cmap);CHKERRQ(ierr); if (cA->lda <= 0) cA->lda = A->rmap->n; if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } if (!d_data) { /* petsc-allocated storage */ size_t sz; ierr = PetscIntMultError(cA->lda,A->cmap->n,NULL);CHKERRQ(ierr); sz = cA->lda*A->cmap->n*sizeof(PetscScalar); cerr = cudaMalloc((void**)&dA->d_v,sz);CHKERRCUDA(cerr); cerr = cudaMemset(dA->d_v,0,sz);CHKERRCUDA(cerr); dA->user_alloc = PETSC_FALSE; } else { /* user-allocated storage */ dA->d_v = d_data; dA->user_alloc = PETSC_TRUE; } A->offloadmask = PETSC_OFFLOAD_GPU; A->preallocated = PETSC_TRUE; A->assembled = PETSC_TRUE; PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyFromGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); ierr = PetscInfo3(A,"%s matrix %d x %d\n",A->offloadmask == PETSC_OFFLOAD_GPU ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_GPU) { if (!cA->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt n = A->cmap->n,m = A->rmap->n; cerr = cudaMemcpy2D(cA->v,cA->lda*sizeof(PetscScalar),dA->d_v,cA->lda*sizeof(PetscScalar),m*sizeof(PetscScalar),n,cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(cA->v,dA->d_v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = PetscLogGpuToCpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyFromGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } PetscErrorCode MatSeqDenseCUDACopyToGPU(Mat A) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool copy; PetscErrorCode ierr; cudaError_t cerr; PetscFunctionBegin; PetscCheckTypeName(A,MATSEQDENSECUDA); if (A->boundtocpu) PetscFunctionReturn(0); copy = (PetscBool)(A->offloadmask == PETSC_OFFLOAD_CPU || A->offloadmask == PETSC_OFFLOAD_UNALLOCATED); ierr = PetscInfo3(A,"%s matrix %d x %d\n",copy ? "Copy" : "Reusing",A->rmap->n,A->cmap->n);CHKERRQ(ierr); if (copy) { if (!dA->d_v) { /* Allocate GPU memory if not present */ ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } ierr = PetscLogEventBegin(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); if (cA->lda > A->rmap->n) { PetscInt n = A->cmap->n,m = A->rmap->n; cerr = cudaMemcpy2D(dA->d_v,cA->lda*sizeof(PetscScalar),cA->v,cA->lda*sizeof(PetscScalar),m*sizeof(PetscScalar),n,cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(dA->d_v,cA->v,cA->lda*sizeof(PetscScalar)*A->cmap->n,cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = PetscLogCpuToGpu(cA->lda*sizeof(PetscScalar)*A->cmap->n);CHKERRQ(ierr); ierr = PetscLogEventEnd(MAT_DenseCopyToGPU,A,0,0,0);CHKERRQ(ierr); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(0); } static PetscErrorCode MatCopy_SeqDenseCUDA(Mat A,Mat B,MatStructure str) { Mat_SeqDense *a = (Mat_SeqDense*)A->data,*b = (Mat_SeqDense*)B->data; PetscErrorCode ierr; const PetscScalar *va; PetscScalar *vb; PetscInt lda1=a->lda,lda2=b->lda,m=A->rmap->n,n=A->cmap->n; cudaError_t cerr; PetscFunctionBegin; /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ if (A->ops->copy != B->ops->copy) { ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); PetscFunctionReturn(0); } if (m != B->rmap->n || n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"size(B) != size(A)"); ierr = MatDenseCUDAGetArrayRead(A,&va);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(B,&vb);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda1>m || lda2>m) { cerr = cudaMemcpy2D(vb,lda2*sizeof(PetscScalar),va,lda1*sizeof(PetscScalar),m*sizeof(PetscScalar),n,cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(vb,va,m*(n*sizeof(PetscScalar)),cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(B,&vb);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatZeroEntries_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscScalar *va; PetscInt lda=a->lda,m = A->rmap->n,n = A->cmap->n; cudaError_t cerr; PetscFunctionBegin; ierr = MatDenseCUDAGetArrayWrite(A,&va);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lda>m) { cerr = cudaMemset2D(va,lda*sizeof(PetscScalar),0,m*sizeof(PetscScalar),n);CHKERRCUDA(cerr); } else { cerr = cudaMemset(va,0,m*(n*sizeof(PetscScalar)));CHKERRCUDA(cerr); } cerr = WaitForCUDA();CHKERRCUDA(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,&va);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAPlaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (aa->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->unplacedarray = dA->d_v; dA->unplaced_user_alloc = dA->user_alloc; dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_TRUE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAResetArray_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->v) { ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); } dA->d_v = dA->unplacedarray; dA->user_alloc = dA->unplaced_user_alloc; dA->unplacedarray = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAReplaceArray_SeqDenseCUDA(Mat A, const PetscScalar *a) { Mat_SeqDense *aa = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; cudaError_t cerr; PetscFunctionBegin; if (aa->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (aa->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } dA->d_v = (PetscScalar*)a; dA->user_alloc = PETSC_FALSE; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (!dA->d_v) { ierr = MatSeqDenseCUDASetPreallocation(A,NULL);CHKERRQ(ierr); } *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayWrite_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArrayRead_SeqDenseCUDA(Mat A, const PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDAGetArray_SeqDenseCUDA(Mat A, PetscScalar **a) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); *a = dA->d_v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseCUDARestoreArray_SeqDenseCUDA(Mat A, PetscScalar **a) { PetscFunctionBegin; *a = NULL; PetscFunctionReturn(0); } PETSC_EXTERN PetscErrorCode MatSeqDenseCUDAInvertFactors_Private(Mat A) { #if PETSC_PKG_CUDA_VERSION_GE(10,1,0) Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscErrorCode ierr; cudaError_t ccer; cusolverStatus_t cerr; cusolverDnHandle_t handle; PetscCuBLASInt n,lda; #if defined(PETSC_USE_DEBUG) PetscCuBLASInt info; #endif PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); if (A->factortype == MAT_FACTOR_LU) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDngetri not implemented"); else if (A->factortype == MAT_FACTOR_CHOLESKY) { if (!dA->d_fact_ipiv) { /* spd */ PetscCuBLASInt il; ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); cerr = cusolverDnXpotri_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&il);CHKERRCUSOLVER(cerr); if (il > dA->fact_lwork) { dA->fact_lwork = il; ccer = cudaFree(dA->d_fact_work);CHKERRCUDA(ccer); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotri(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); /* TODO (write cuda kernel) */ ierr = MatSeqDenseSymmetrize_Private(A,PETSC_TRUE);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytri not implemented"); } #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: leading minor of order %d is zero",info); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); A->ops->solve = NULL; A->ops->solvetranspose = NULL; A->ops->matsolve = NULL; A->factortype = MAT_FACTOR_NONE; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Upgrade to CUDA version 10.1.0 or higher"); #endif } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal(Mat A, Vec xx, Vec yy, PetscBool transpose, PetscErrorCode (*matsolve)(Mat,PetscScalar*,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscBool)) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *y; PetscCuBLASInt m=0, k=0; PetscBool xiscuda, yiscuda, aiscuda; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)xx,VECSEQCUDA,&xiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)yy,VECSEQCUDA,&yiscuda);CHKERRQ(ierr); { const PetscScalar *x; PetscBool xishost = PETSC_TRUE; /* The logic here is to try to minimize the amount of memory copying: if we call VecCUDAGetArrayRead(X,&x) every time xiscuda and the data is not offloaded to the GPU yet, then the data is copied to the GPU. But we are only trying to get the data in order to copy it into the y array. So the array x will be wherever the data already is so that only one memcpy is performed */ if (xiscuda && xx->offloadmask & PETSC_OFFLOAD_GPU) { ierr = VecCUDAGetArrayRead(xx, &x);CHKERRQ(ierr); xishost = PETSC_FALSE; } else { ierr = VecGetArrayRead(xx, &x);CHKERRQ(ierr); } if (k < m || !yiscuda) { if (!dA->workvec) { ierr = VecCreateSeqCUDA(PetscObjectComm((PetscObject)A), m, &(dA->workvec));CHKERRQ(ierr); } ierr = VecCUDAGetArrayWrite(dA->workvec, &y);CHKERRQ(ierr); } else { ierr = VecCUDAGetArrayWrite(yy,&y);CHKERRQ(ierr); } cerr = cudaMemcpy(y,x,m*sizeof(PetscScalar),xishost ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); } ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&aiscuda);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } ierr = (*matsolve) (A, y, m, m, 1, k, transpose);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (k < m || !yiscuda) { PetscScalar *yv; /* The logic here is that the data is not yet in either yy's GPU array or its CPU array. There is nothing in the interface to say where the user would like it to end up. So we choose the GPU, because it is the faster option */ if (yiscuda) { ierr = VecCUDAGetArrayWrite(yy,&yv);CHKERRQ(ierr); } else { ierr = VecGetArray(yy,&yv);CHKERRQ(ierr); } cerr = cudaMemcpy(yv,y,k*sizeof(PetscScalar),yiscuda ? cudaMemcpyDeviceToDevice: cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); if (yiscuda) { ierr = VecCUDARestoreArrayWrite(yy,&yv);CHKERRQ(ierr); } else { ierr = VecRestoreArray(yy,&yv);CHKERRQ(ierr); } ierr = VecCUDARestoreArrayWrite(dA->workvec, &y);CHKERRQ(ierr); } else { ierr = VecCUDARestoreArrayWrite(yy,&y);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_Internal(Mat A, Mat B, Mat X, PetscBool transpose, PetscErrorCode (*matsolve)(Mat,PetscScalar*,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscCuBLASInt,PetscBool)) { PetscScalar *y; PetscInt n, _ldb, _ldx; PetscBool biscuda, xiscuda, aiscuda; PetscCuBLASInt nrhs=0,m=0,k=0,ldb=0,ldx=0,ldy=0; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Matrix must be factored to solve"); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); ierr = MatGetSize(B,NULL,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(n,&nrhs);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&_ldb);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(_ldb, &ldb);CHKERRQ(ierr); ierr = MatDenseGetLDA(X,&_ldx);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(_ldx, &ldx);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&biscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)X,MATSEQDENSECUDA,&xiscuda);CHKERRQ(ierr); { /* The logic here is to try to minimize the amount of memory copying: if we call MatDenseCUDAGetArrayRead(B,&b) every time biscuda and the data is not offloaded to the GPU yet, then the data is copied to the GPU. But we are only trying to get the data in order to copy it into the y array. So the array b will be wherever the data already is so that only one memcpy is performed */ const PetscScalar *b; /* some copying from B will be involved */ PetscBool bishost = PETSC_TRUE; if (biscuda && B->offloadmask & PETSC_OFFLOAD_GPU) { ierr = MatDenseCUDAGetArrayRead(B,&b);CHKERRQ(ierr); bishost = PETSC_FALSE; } else { ierr = MatDenseGetArrayRead(B,&b);CHKERRQ(ierr); } if (ldx < m || !xiscuda) { /* X's array cannot serve as the array (too small or not on device), B's * array cannot serve as the array (const), so allocate a new array */ ldy = m; cerr = cudaMalloc((void**)&y,nrhs*m*sizeof(PetscScalar));CHKERRCUDA(cerr); } else { /* X's array should serve as the array */ ldy = ldx; ierr = MatDenseCUDAGetArrayWrite(X,&y);CHKERRQ(ierr); } cerr = cudaMemcpy2D(y,ldy*sizeof(PetscScalar),b,ldb*sizeof(PetscScalar),m*sizeof(PetscScalar),nrhs,bishost ? cudaMemcpyHostToDevice: cudaMemcpyDeviceToDevice);CHKERRCUDA(cerr); if (bishost) { ierr = MatDenseRestoreArrayRead(B,&b);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayRead(B,&b);CHKERRQ(ierr); } } ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&aiscuda);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } ierr = (*matsolve) (A, y, ldy, m, nrhs, k, transpose);CHKERRQ(ierr); if (!aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (ldx < m || !xiscuda) { PetscScalar *x; /* The logic here is that the data is not yet in either X's GPU array or its CPU array. There is nothing in the interface to say where the user would like it to end up. So we choose the GPU, because it is the faster option */ if (xiscuda) { ierr = MatDenseCUDAGetArrayWrite(X,&x);CHKERRQ(ierr); } else { ierr = MatDenseGetArray(X,&x);CHKERRQ(ierr); } cerr = cudaMemcpy2D(x,ldx*sizeof(PetscScalar),y,ldy*sizeof(PetscScalar),k*sizeof(PetscScalar),nrhs,xiscuda ? cudaMemcpyDeviceToDevice: cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); if (xiscuda) { ierr = MatDenseCUDARestoreArrayWrite(X,&x);CHKERRQ(ierr); } else { ierr = MatDenseRestoreArray(X,&x);CHKERRQ(ierr); } cerr = cudaFree(y);CHKERRCUDA(cerr); } else { ierr = MatDenseCUDARestoreArrayWrite(X,&y);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_LU(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda; cusolverDnHandle_t handle; cudaError_t ccer; cusolverStatus_t cerr; int info; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"LU solve %d x %d on backend\n",m,k);CHKERRQ(ierr); cerr = cusolverDnXgetrs(handle,T ? CUBLAS_OP_T : CUBLAS_OP_N,m,nrhs,da,lda,dA->d_fact_ipiv,x,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG)) { ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } ierr = PetscLogGpuFlops(nrhs*(2.0*m*m - m));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_Cholesky(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda; cusolverDnHandle_t handle; cudaError_t ccer; cusolverStatus_t cerr; int info; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky solve %d x %d on backend\n",m,k);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { /* spd */ /* ========= Program hit cudaErrorNotReady (error 34) due to "device not ready" on CUDA API call to cudaEventQuery. */ cerr = cusolverDnXpotrs(handle,CUBLAS_FILL_MODE_LOWER,m,nrhs,da,lda,x,ldx,dA->d_fact_info);CHKERRCUSOLVER(cerr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"cusolverDnsytrs not implemented"); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); if (PetscDefined(USE_DEBUG)) { ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } ierr = PetscLogGpuFlops(nrhs*(2.0*m*m - m));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Internal_QR(Mat A, PetscScalar *x, PetscCuBLASInt ldx, PetscCuBLASInt m, PetscCuBLASInt nrhs, PetscCuBLASInt k, PetscBool T) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; const PetscScalar *da; PetscCuBLASInt lda, rank; cusolverDnHandle_t handle; cublasHandle_t bhandle; cudaError_t ccer; cusolverStatus_t csrr; cublasStatus_t cbrr; int info; cublasOperation_t trans; PetscScalar one = 1.; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(mat->rank,&rank);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&bhandle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); ierr = PetscInfo2(A,"QR solve %d x %d on backend\n",m,k);CHKERRQ(ierr); if (!T) { if (PetscDefined(USE_COMPLEX)) { trans = CUBLAS_OP_C; } else { trans = CUBLAS_OP_T; } csrr = cusolverDnXormqr(handle, CUBLAS_SIDE_LEFT, trans, m, nrhs, rank, da, lda, dA->d_fact_tau, x, ldx, dA->d_fact_work, dA->fact_lwork, dA->d_fact_info);CHKERRCUSOLVER(csrr); if (PetscDefined(USE_DEBUG)) { ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } cbrr = cublasXtrsm(bhandle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, rank, nrhs, &one, da, lda, x, ldx);CHKERRCUBLAS(cbrr); } else { cbrr = cublasXtrsm(bhandle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, rank, nrhs, &one, da, lda, x, ldx);CHKERRCUBLAS(cbrr); csrr = cusolverDnXormqr(handle, CUBLAS_SIDE_LEFT, CUBLAS_OP_N, m, nrhs, rank, da, lda, dA->d_fact_tau, x, ldx, dA->d_fact_work, dA->fact_lwork, dA->d_fact_info);CHKERRCUSOLVER(csrr); if (PetscDefined(USE_DEBUG)) { ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); } } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = PetscLogFlops(nrhs*(4.0*m*mat->rank - PetscSqr(mat->rank)));CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_LU(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_LU(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_Cholesky(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_Cholesky(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolve_SeqDenseCUDA_QR(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatSolveTranspose_SeqDenseCUDA_QR(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolve_SeqDenseCUDA_Internal(A, xx, yy, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_LU(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_LU(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_LU);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_Cholesky(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_Cholesky(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_Cholesky);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolve_SeqDenseCUDA_QR(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_FALSE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatMatSolveTranspose_SeqDenseCUDA_QR(Mat A,Mat B,Mat X) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatSolve_SeqDenseCUDA_Internal(A, B, X, PETSC_TRUE, MatSolve_SeqDenseCUDA_Internal_QR);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatLUFactor_SeqDenseCUDA(Mat A,IS rperm,IS cperm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt m,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"LU factor %d x %d on backend\n",m,n);CHKERRQ(ierr); if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgetrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgetrf(handle,m,n,da,lda,dA->d_fact_work,dA->d_fact_ipiv,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_LU; ierr = PetscLogGpuFlops(2.0*n*n*m/3.0);CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA_LU; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_LU; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_LU; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_LU; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatCholeskyFactor_SeqDenseCUDA(Mat A,IS perm,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&n);CHKERRQ(ierr); ierr = PetscInfo2(A,"Cholesky factor %d x %d on backend\n",n,n);CHKERRQ(ierr); if (A->spd) { ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); if (!dA->fact_lwork) { cerr = cusolverDnXpotrf_bufferSize(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXpotrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info > 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_CH_ZRPVT,"Bad factorization: zero pivot in row %d",info-1); else if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_CHOLESKY; ierr = PetscLogGpuFlops(1.0*n*n*n/3.0);CHKERRQ(ierr); } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"cusolverDnsytrs unavailable. Use MAT_FACTOR_LU"); #if 0 /* at the time of writing this interface (cuda 10.0), cusolverDn does not implement *sytrs and *hetr* routines The code below should work, and it can be activated when *sytrs routines will be available */ if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXsytrf_bufferSize(handle,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXsytrf(handle,CUBLAS_FILL_MODE_LOWER,n,da,lda,dA->d_fact_ipiv,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); #endif A->ops->solve = MatSolve_SeqDenseCUDA_Cholesky; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_Cholesky; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_Cholesky; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_Cholesky; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatQRFactor_SeqDenseCUDA(Mat A,IS col,const MatFactorInfo *factinfo) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscScalar *da; PetscCuBLASInt m,min,max,n,lda; #if defined(PETSC_USE_DEBUG) int info; #endif cusolverStatus_t cerr; cusolverDnHandle_t handle; cudaError_t ccer; PetscErrorCode ierr; PetscFunctionBegin; if (!A->rmap->n || !A->cmap->n) PetscFunctionReturn(0); ierr = PetscCUSOLVERDnGetHandle(&handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(A,&da);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(a->lda,&lda);CHKERRQ(ierr); ierr = PetscInfo2(A,"QR factor %d x %d on backend\n",m,n);CHKERRQ(ierr); max = PetscMax(m,n); min = PetscMin(m,n); if (!dA->d_fact_tau) { ccer = cudaMalloc((void**)&dA->d_fact_tau,min*sizeof(*dA->d_fact_tau));CHKERRCUDA(ccer); } if (!dA->d_fact_ipiv) { ccer = cudaMalloc((void**)&dA->d_fact_ipiv,n*sizeof(*dA->d_fact_ipiv));CHKERRCUDA(ccer); } if (!dA->fact_lwork) { cerr = cusolverDnXgeqrf_bufferSize(handle,m,n,da,lda,&dA->fact_lwork);CHKERRCUSOLVER(cerr); ccer = cudaMalloc((void**)&dA->d_fact_work,dA->fact_lwork*sizeof(*dA->d_fact_work));CHKERRCUDA(ccer); } if (!dA->d_fact_info) { ccer = cudaMalloc((void**)&dA->d_fact_info,sizeof(*dA->d_fact_info));CHKERRCUDA(ccer); } if (!dA->workvec) { ierr = VecCreateSeqCUDA(PetscObjectComm((PetscObject)A), m, &(dA->workvec));CHKERRQ(ierr); } ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); cerr = cusolverDnXgeqrf(handle,m,n,da,lda,dA->d_fact_tau,dA->d_fact_work,dA->fact_lwork,dA->d_fact_info);CHKERRCUSOLVER(cerr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,&da);CHKERRQ(ierr); #if defined(PETSC_USE_DEBUG) ccer = cudaMemcpy(&info, dA->d_fact_info, sizeof(PetscCuBLASInt), cudaMemcpyDeviceToHost);CHKERRCUDA(ccer); if (info < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong argument to cuSolver %d",-info); #endif A->factortype = MAT_FACTOR_QR; a->rank = min; ierr = PetscLogGpuFlops(2.0*min*min*(max-min/3.0));CHKERRQ(ierr); A->ops->solve = MatSolve_SeqDenseCUDA_QR; A->ops->solvetranspose = MatSolveTranspose_SeqDenseCUDA_QR; A->ops->matsolve = MatMatSolve_SeqDenseCUDA_QR; A->ops->matsolvetranspose = MatMatSolveTranspose_SeqDenseCUDA_QR; ierr = PetscFree(A->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&A->solvertype);CHKERRQ(ierr); PetscFunctionReturn(0); } /* GEMM kernel: C = op(A)*op(B), tA, tB flag transposition */ PETSC_INTERN PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(Mat A,Mat B,Mat C,PetscBool tA,PetscBool tB) { const PetscScalar *da,*db; PetscScalar *dc; PetscScalar one=1.0,zero=0.0; PetscCuBLASInt m,n,k; PetscInt alda,blda,clda; PetscErrorCode ierr; cublasHandle_t cublasv2handle; PetscBool Aiscuda,Biscuda; cublasStatus_t berr; PetscFunctionBegin; /* we may end up with SEQDENSE as one of the arguments */ ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQDENSECUDA,&Aiscuda);CHKERRQ(ierr); ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQDENSECUDA,&Biscuda);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } ierr = PetscCuBLASIntCast(C->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(C->cmap->n,&n);CHKERRQ(ierr); if (tA) { ierr = PetscCuBLASIntCast(A->rmap->n,&k);CHKERRQ(ierr); } else { ierr = PetscCuBLASIntCast(A->cmap->n,&k);CHKERRQ(ierr); } if (!m || !n || !k) PetscFunctionReturn(0); ierr = PetscInfo3(C,"Matrix-Matrix product %d x %d x %d on backend\n",m,k,n);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayWrite(C,&dc);CHKERRQ(ierr); ierr = MatDenseGetLDA(A,&alda);CHKERRQ(ierr); ierr = MatDenseGetLDA(B,&blda);CHKERRQ(ierr); ierr = MatDenseGetLDA(C,&clda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemm(cublasv2handle,tA ? CUBLAS_OP_T : CUBLAS_OP_N,tB ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,k,&one,da,alda,db,blda,&zero,dc,clda);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(1.0*m*n*k + 1.0*m*n*(k-1));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(B,&db);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(C,&dc);CHKERRQ(ierr); if (!Aiscuda) { ierr = MatConvert(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); } if (!Biscuda) { ierr = MatConvert(B,MATSEQDENSE,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_TRUE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA(Mat A,Mat B,Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Private(A,B,C,PETSC_FALSE,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatProductSetFromOptions_SeqDenseCUDA(Mat C) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatProductSetFromOptions_SeqDense(C);CHKERRQ(ierr); PetscFunctionReturn(0); } /* zz = op(A)*xx + yy if yy == NULL, only MatMult */ static PetscErrorCode MatMultAdd_SeqDenseCUDA_Private(Mat A,Vec xx,Vec yy,Vec zz,PetscBool trans) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; const PetscScalar *xarray,*da; PetscScalar *zarray; PetscScalar one=1.0,zero=0.0; PetscCuBLASInt m, n, lda; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (yy && yy != zz) { /* mult add */ ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); } if (!A->rmap->n || !A->cmap->n) { if (!yy) { /* mult only */ ierr = VecSet_SeqCUDA(zz,0.0);CHKERRQ(ierr); } PetscFunctionReturn(0); } ierr = PetscInfo2(A,"Matrix-vector product %d x %d on backend\n",A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(A->cmap->n,&n);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(mat->lda,&lda);CHKERRQ(ierr); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(A,&da);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArray(zz,&zarray);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); berr = cublasXgemv(cublasv2handle,trans ? CUBLAS_OP_T : CUBLAS_OP_N, m,n,&one,da,lda,xarray,1,(yy ? &one : &zero),zarray,1);CHKERRCUBLAS(berr); ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(2.0*A->rmap->n*A->cmap->n - (yy ? 0 : A->rmap->n));CHKERRQ(ierr); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArray(zz,&zarray);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&da);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTransposeAdd_SeqDenseCUDA(Mat A,Vec xx,Vec yy,Vec zz) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,yy,zz,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMult_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_FALSE);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatMultTranspose_SeqDenseCUDA(Mat A,Vec xx,Vec yy) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatMultAdd_SeqDenseCUDA_Private(A,xx,NULL,yy,PETSC_TRUE);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayRead_SeqDenseCUDA(Mat A,const PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArrayWrite_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!mat->v) { /* MatCreateSeqDenseCUDA may not allocate CPU memory. Allocate if needed */ ierr = MatSeqDenseSetPreallocation(A,NULL);CHKERRQ(ierr); } *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetArray_SeqDenseCUDA(Mat A,PetscScalar **array) { Mat_SeqDense *mat = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); *array = mat->v; A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatScale_SeqDenseCUDA(Mat Y,PetscScalar alpha) { Mat_SeqDense *y = (Mat_SeqDense*)Y->data; PetscScalar *dy; PetscCuBLASInt j,N,m,lday,one = 1; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(Y->rmap->n*Y->cmap->n,&N);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(Y->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing Scale %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (lday>m) { for (j=0; j<Y->cmap->n; j++) { berr = cublasXscal(cublasv2handle,m,&alpha,dy+lday*j,one);CHKERRCUBLAS(berr); } } else { berr = cublasXscal(cublasv2handle,N,&alpha,dy,one);CHKERRCUBLAS(berr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(N);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatAXPY_SeqDenseCUDA(Mat Y,PetscScalar alpha,Mat X,MatStructure str) { Mat_SeqDense *x = (Mat_SeqDense*)X->data; Mat_SeqDense *y = (Mat_SeqDense*)Y->data; const PetscScalar *dx; PetscScalar *dy; PetscCuBLASInt j,N,m,ldax,lday,one = 1; cublasHandle_t cublasv2handle; cublasStatus_t berr; PetscErrorCode ierr; PetscFunctionBegin; if (!X->rmap->n || !X->cmap->n) PetscFunctionReturn(0); ierr = PetscCUBLASGetHandle(&cublasv2handle);CHKERRQ(ierr); ierr = MatDenseCUDAGetArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDAGetArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDAGetArrayWrite(Y,&dy);CHKERRQ(ierr); } ierr = PetscCuBLASIntCast(X->rmap->n*X->cmap->n,&N);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(X->rmap->n,&m);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(x->lda,&ldax);CHKERRQ(ierr); ierr = PetscCuBLASIntCast(y->lda,&lday);CHKERRQ(ierr); ierr = PetscInfo2(Y,"Performing AXPY %d x %d on backend\n",Y->rmap->n,Y->cmap->n);CHKERRQ(ierr); ierr = PetscLogGpuTimeBegin();CHKERRQ(ierr); if (ldax>m || lday>m) { for (j=0; j<X->cmap->n; j++) { berr = cublasXaxpy(cublasv2handle,m,&alpha,dx+j*ldax,one,dy+j*lday,one);CHKERRCUBLAS(berr); } } else { berr = cublasXaxpy(cublasv2handle,N,&alpha,dx,one,dy,one);CHKERRCUBLAS(berr); } ierr = PetscLogGpuTimeEnd();CHKERRQ(ierr); ierr = PetscLogGpuFlops(PetscMax(2.*N-1,0));CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(X,&dx);CHKERRQ(ierr); if (alpha != 0.0) { ierr = MatDenseCUDARestoreArray(Y,&dy);CHKERRQ(ierr); } else { ierr = MatDenseCUDARestoreArrayWrite(Y,&dy);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatReset_SeqDenseCUDA(Mat A) { Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; cudaError_t cerr; PetscErrorCode ierr; PetscFunctionBegin; if (dA) { if (dA->unplacedarray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"MatDenseCUDAResetArray() must be called first"); if (!dA->user_alloc) { cerr = cudaFree(dA->d_v);CHKERRCUDA(cerr); } cerr = cudaFree(dA->d_fact_tau);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_ipiv);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_info);CHKERRCUDA(cerr); cerr = cudaFree(dA->d_fact_work);CHKERRCUDA(cerr); ierr = VecDestroy(&dA->workvec);CHKERRQ(ierr); } ierr = PetscFree(A->spptr);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDestroy_SeqDenseCUDA(Mat A) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; /* prevent to copy back data if we own the data pointer */ if (!a->user_alloc) { A->offloadmask = PETSC_OFFLOAD_CPU; } ierr = MatConvert_SeqDenseCUDA_SeqDense(A,MATSEQDENSE,MAT_INPLACE_MATRIX,&A);CHKERRQ(ierr); ierr = MatDestroy_SeqDense(A);CHKERRQ(ierr); PetscFunctionReturn(0); } PetscErrorCode MatDuplicate_SeqDenseCUDA(Mat A,MatDuplicateOption cpvalues,Mat *B) { PetscErrorCode ierr; MatDuplicateOption hcpvalues = (cpvalues == MAT_COPY_VALUES && A->offloadmask != PETSC_OFFLOAD_CPU) ? MAT_DO_NOT_COPY_VALUES : cpvalues; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); ierr = MatSetSizes(*B,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*B,((PetscObject)A)->type_name);CHKERRQ(ierr); ierr = MatDuplicateNoCreate_SeqDense(*B,A,hcpvalues);CHKERRQ(ierr); if (cpvalues == MAT_COPY_VALUES && hcpvalues != MAT_COPY_VALUES) { ierr = MatCopy_SeqDenseCUDA(A,*B,SAME_NONZERO_PATTERN);CHKERRQ(ierr); } PetscFunctionReturn(0); } static PetscErrorCode MatGetColumnVector_SeqDenseCUDA(Mat A,Vec v,PetscInt col) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscScalar *x; PetscBool viscuda; cudaError_t cerr; PetscFunctionBegin; ierr = PetscObjectTypeCompareAny((PetscObject)v,&viscuda,VECSEQCUDA,VECMPICUDA,VECCUDA,"");CHKERRQ(ierr); if (viscuda && !v->boundtocpu) { /* update device data */ ierr = VecCUDAGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToHost);CHKERRCUDA(cerr); } else { cerr = cudaMemcpy(x,a->v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(cerr); } ierr = VecCUDARestoreArrayWrite(v,&x);CHKERRQ(ierr); } else { /* update host data */ ierr = VecGetArrayWrite(v,&x);CHKERRQ(ierr); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask & PETSC_OFFLOAD_CPU) { ierr = PetscArraycpy(x,a->v+col*a->lda,A->rmap->n);CHKERRQ(ierr); } else if (A->offloadmask & PETSC_OFFLOAD_GPU) { cerr = cudaMemcpy(x,dA->d_v + col*a->lda,A->rmap->n*sizeof(PetscScalar),cudaMemcpyDeviceToHost);CHKERRCUDA(cerr); } ierr = VecRestoreArrayWrite(v,&x);CHKERRQ(ierr); } PetscFunctionReturn(0); } PETSC_INTERN PetscErrorCode MatGetFactor_seqdense_cuda(Mat A,MatFactorType ftype,Mat *fact) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),fact);CHKERRQ(ierr); ierr = MatSetSizes(*fact,A->rmap->n,A->cmap->n,A->rmap->n,A->cmap->n);CHKERRQ(ierr); ierr = MatSetType(*fact,MATSEQDENSECUDA);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU) { (*fact)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqDense; (*fact)->ops->ilufactorsymbolic = MatLUFactorSymbolic_SeqDense; } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*fact)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqDense; } else if (ftype == MAT_FACTOR_QR) { ierr = PetscObjectComposeFunction((PetscObject)(*fact),"MatQRFactor_C",MatQRFactor_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*fact),"MatQRFactorSymbolic_C",MatQRFactorSymbolic_SeqDense);CHKERRQ(ierr); } (*fact)->factortype = ftype; ierr = PetscFree((*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATSOLVERCUDA,&(*fact)->solvertype);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_LU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ILU]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_CHOLESKY]);CHKERRQ(ierr); ierr = PetscStrallocpy(MATORDERINGEXTERNAL,(char**)&(*fact)->preferredordering[MAT_FACTOR_ICC]);CHKERRQ(ierr); PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVec_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArray(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); ierr = VecLockReadPush(a->cvec);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecRead_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecLockReadPop(a->cvec);CHKERRQ(ierr); ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayRead(A,&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); ierr = MatDenseCUDAGetArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); if (!a->cvec) { /* we pass the data of A, to prevent allocating needless GPU memory the first time VecCUDAPlaceArray is called */ ierr = VecCreateSeqCUDAWithArray(PetscObjectComm((PetscObject)A),A->rmap->bs,A->rmap->n,a->ptrinuse,&a->cvec);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cvec);CHKERRQ(ierr); } a->vecinuse = col + 1; ierr = VecCUDAPlaceArray(a->cvec,a->ptrinuse + (size_t)col * (size_t)a->lda);CHKERRQ(ierr); *v = a->cvec; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreColumnVecWrite_SeqDenseCUDA(Mat A,PetscInt col,Vec *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetColumnVec() first"); if (!a->cvec) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column vector"); a->vecinuse = 0; ierr = VecCUDAResetArray(a->cvec);CHKERRQ(ierr); ierr = MatDenseCUDARestoreArrayWrite(A,(PetscScalar**)&a->ptrinuse);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseGetSubMatrix_SeqDenseCUDA(Mat A,PetscInt cbegin,PetscInt cend,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); if (a->cmat && cend-cbegin != a->cmat->cmap->N) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = MatSeqDenseCUDACopyToGPU(A);CHKERRQ(ierr); if (!a->cmat) { ierr = MatCreateDenseCUDA(PetscObjectComm((PetscObject)A),A->rmap->n,PETSC_DECIDE,A->rmap->N,cend-cbegin,dA->d_v + (size_t)cbegin * (size_t)a->lda,&a->cmat);CHKERRQ(ierr); ierr = PetscLogObjectParent((PetscObject)A,(PetscObject)a->cmat);CHKERRQ(ierr); } else { ierr = MatDenseCUDAPlaceArray(a->cmat,dA->d_v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } ierr = MatDenseSetLDA(a->cmat,a->lda);CHKERRQ(ierr); if (a->v) { ierr = MatDensePlaceArray(a->cmat,a->v + (size_t)cbegin * (size_t)a->lda);CHKERRQ(ierr); } a->cmat->offloadmask = A->offloadmask; a->matinuse = cbegin + 1; *v = a->cmat; PetscFunctionReturn(0); } static PetscErrorCode MatDenseRestoreSubMatrix_SeqDenseCUDA(Mat A,Mat *v) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (!a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseGetSubMatrix() first"); if (!a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Missing internal column matrix"); if (*v != a->cmat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Not the matrix obtained from MatDenseGetSubMatrix()"); a->matinuse = 0; A->offloadmask = PETSC_OFFLOAD_GPU; ierr = MatDenseCUDAResetArray(a->cmat);CHKERRQ(ierr); ierr = MatDenseResetArray(a->cmat);CHKERRQ(ierr); *v = NULL; PetscFunctionReturn(0); } static PetscErrorCode MatDenseSetLDA_SeqDenseCUDA(Mat A,PetscInt lda) { Mat_SeqDense *cA = (Mat_SeqDense*)A->data; Mat_SeqDenseCUDA *dA = (Mat_SeqDenseCUDA*)A->spptr; PetscBool data; PetscFunctionBegin; data = (PetscBool)((A->rmap->n > 0 && A->cmap->n > 0) ? (dA->d_v ? PETSC_TRUE : PETSC_FALSE) : PETSC_FALSE); if (!dA->user_alloc && data && cA->lda!=lda) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"LDA cannot be changed after allocation of internal storage"); if (lda < A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"LDA %D must be at least matrix dimension %D",lda,A->rmap->n); cA->lda = lda; PetscFunctionReturn(0); } static PetscErrorCode MatBindToCPU_SeqDenseCUDA(Mat A,PetscBool flg) { Mat_SeqDense *a = (Mat_SeqDense*)A->data; PetscErrorCode ierr; PetscFunctionBegin; if (a->vecinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreColumnVec() first"); if (a->matinuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ORDER,"Need to call MatDenseRestoreSubMatrix() first"); A->boundtocpu = flg; if (!flg) { PetscBool iscuda; ierr = PetscObjectTypeCompare((PetscObject)a->cvec,VECSEQCUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = VecDestroy(&a->cvec);CHKERRQ(ierr); } ierr = PetscObjectTypeCompare((PetscObject)a->cmat,MATSEQDENSECUDA,&iscuda);CHKERRQ(ierr); if (!iscuda) { ierr = MatDestroy(&a->cmat);CHKERRQ(ierr); } ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatQRFactor_C",MatQRFactor_SeqDenseCUDA);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDenseCUDA; A->ops->mult = MatMult_SeqDenseCUDA; A->ops->multadd = MatMultAdd_SeqDenseCUDA; A->ops->multtranspose = MatMultTranspose_SeqDenseCUDA; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDenseCUDA; A->ops->matmultnumeric = MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA; A->ops->axpy = MatAXPY_SeqDenseCUDA; A->ops->choleskyfactor = MatCholeskyFactor_SeqDenseCUDA; A->ops->lufactor = MatLUFactor_SeqDenseCUDA; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDenseCUDA; A->ops->getcolumnvector = MatGetColumnVector_SeqDenseCUDA; A->ops->scale = MatScale_SeqDenseCUDA; A->ops->copy = MatCopy_SeqDenseCUDA; A->ops->zeroentries = MatZeroEntries_SeqDenseCUDA; } else { /* make sure we have an up-to-date copy on the CPU */ ierr = MatSeqDenseCUDACopyFromGPU(A);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArray_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayRead_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetArrayWrite_C",MatDenseGetArray_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVec_C",MatDenseGetColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVec_C",MatDenseRestoreColumnVec_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecRead_C",MatDenseGetColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecRead_C",MatDenseRestoreColumnVecRead_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetColumnVecWrite_C",MatDenseGetColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreColumnVecWrite_C",MatDenseRestoreColumnVecWrite_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseGetSubMatrix_C",MatDenseGetSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseRestoreSubMatrix_C",MatDenseRestoreSubMatrix_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatDenseSetLDA_C",MatDenseSetLDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)A,"MatQRFactor_C",MatQRFactor_SeqDense);CHKERRQ(ierr); A->ops->duplicate = MatDuplicate_SeqDense; A->ops->mult = MatMult_SeqDense; A->ops->multadd = MatMultAdd_SeqDense; A->ops->multtranspose = MatMultTranspose_SeqDense; A->ops->multtransposeadd = MatMultTransposeAdd_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->matmultnumeric = MatMatMultNumeric_SeqDense_SeqDense; A->ops->mattransposemultnumeric = MatMatTransposeMultNumeric_SeqDense_SeqDense; A->ops->transposematmultnumeric = MatTransposeMatMultNumeric_SeqDense_SeqDense; A->ops->axpy = MatAXPY_SeqDense; A->ops->choleskyfactor = MatCholeskyFactor_SeqDense; A->ops->lufactor = MatLUFactor_SeqDense; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqDense; A->ops->getcolumnvector = MatGetColumnVector_SeqDense; A->ops->scale = MatScale_SeqDense; A->ops->copy = MatCopy_SeqDense; A->ops->zeroentries = MatZeroEntries_SeqDense; } if (a->cmat) { ierr = MatBindToCPU(a->cmat,flg);CHKERRQ(ierr); } PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDenseCUDA_SeqDense(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat B; PetscErrorCode ierr; PetscFunctionBegin; if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_TRUE);CHKERRQ(ierr); ierr = MatReset_SeqDenseCUDA(B);CHKERRQ(ierr); ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECSTANDARD,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSE);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C",NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",NULL);CHKERRQ(ierr); B->ops->bindtocpu = NULL; B->ops->destroy = MatDestroy_SeqDense; B->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(0); } PetscErrorCode MatConvert_SeqDense_SeqDenseCUDA(Mat M,MatType type,MatReuse reuse,Mat *newmat) { Mat_SeqDenseCUDA *dB; Mat B; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); if (reuse == MAT_REUSE_MATRIX || reuse == MAT_INITIAL_MATRIX) { /* TODO these cases should be optimized */ ierr = MatConvert_Basic(M,type,reuse,newmat);CHKERRQ(ierr); PetscFunctionReturn(0); } B = *newmat; ierr = PetscFree(B->defaultvectype);CHKERRQ(ierr); ierr = PetscStrallocpy(VECCUDA,&B->defaultvectype);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_seqdensecuda_seqdense_C", MatConvert_SeqDenseCUDA_SeqDense);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArray_C", MatDenseCUDAGetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayRead_C", MatDenseCUDAGetArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAGetArrayWrite_C", MatDenseCUDAGetArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArray_C", MatDenseCUDARestoreArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayRead_C", MatDenseCUDARestoreArrayRead_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDARestoreArrayWrite_C", MatDenseCUDARestoreArrayWrite_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAPlaceArray_C", MatDenseCUDAPlaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAResetArray_C", MatDenseCUDAResetArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatDenseCUDAReplaceArray_C", MatDenseCUDAReplaceArray_SeqDenseCUDA);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)B,"MatProductSetFromOptions_seqaij_seqdensecuda_C",MatProductSetFromOptions_SeqAIJ_SeqDense);CHKERRQ(ierr); ierr = PetscNewLog(B,&dB);CHKERRQ(ierr); B->spptr = dB; B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; ierr = MatBindToCPU_SeqDenseCUDA(B,PETSC_FALSE);CHKERRQ(ierr); B->ops->bindtocpu = MatBindToCPU_SeqDenseCUDA; B->ops->destroy = MatDestroy_SeqDenseCUDA; PetscFunctionReturn(0); } /*@C MatCreateSeqDenseCUDA - Creates a sequential matrix in dense format using CUDA. Collective Input Parameters: + comm - MPI communicator . m - number of rows . n - number of columns - data - optional location of GPU matrix data. Set data=NULL for PETSc to control matrix memory allocation. Output Parameter: . A - the matrix Notes: Level: intermediate .seealso: MatCreate(), MatCreateSeqDense() @*/ PetscErrorCode MatCreateSeqDenseCUDA(MPI_Comm comm,PetscInt m,PetscInt n,PetscScalar *data,Mat *A) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(comm,&size);CHKERRMPI(ierr); if (size > 1) SETERRQ1(comm,PETSC_ERR_ARG_WRONG,"Invalid communicator size %d",size); ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQDENSECUDA);CHKERRQ(ierr); ierr = MatSeqDenseCUDASetPreallocation(*A,data);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC MATSEQDENSECUDA - MATSEQDENSECUDA = "seqdensecuda" - A matrix type to be used for sequential dense matrices on GPUs. Options Database Keys: . -mat_type seqdensecuda - sets the matrix type to "seqdensecuda" during a call to MatSetFromOptions() Level: beginner M*/ PETSC_EXTERN PetscErrorCode MatCreate_SeqDenseCUDA(Mat B) { PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr); ierr = MatCreate_SeqDense(B);CHKERRQ(ierr); ierr = MatConvert_SeqDense_SeqDenseCUDA(B,MATSEQDENSECUDA,MAT_INPLACE_MATRIX,&B);CHKERRQ(ierr); PetscFunctionReturn(0); }
df140db69673a03c319d22205614ac2c4a2c6c15.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Force.h" #include "ParticleSystem.h" __global__ void constantForceApply(Particle *particle, int particleSize, const Vec3f *constantForce) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } particle[particleIdx].addForce(constantForce); } void ConstantForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); hipLaunchKernelGGL(( constantForceApply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void dampingForceApply(Particle *particle, int particleSize, const float *dampingConstant) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } const Vec3f &particleVelocity = particle[particleIdx].m_velocity; Vec3f dampingForce = -*dampingConstant * particleVelocity; particle[particleIdx].addForce(&dampingForce); } void DampingForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); hipLaunchKernelGGL(( dampingForceApply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void springForceApply(Particle *particle, int particleSize, const float *springConstant, const Vec3f *restLocation) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } const Vec3f &particleLocation = particle[particleIdx].m_location; Vec3f springForce = *springConstant * (*restLocation - particleLocation); particle[particleIdx].addForce(&springForce); } void SpringForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); hipLaunchKernelGGL(( springForceApply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pSystem->m_particleList, pSystem->m_particleSize, m_constant, m_restLocation); } __global__ void gravityForceApply(Particle *particle, int particleSize, const Vec3f *gravityConstant) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } float particleMass = particle[particleIdx].m_mass; Vec3f gravityForce = particleMass * -*gravityConstant; particle[particleIdx].addForce(&gravityForce); } void GravityForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); hipLaunchKernelGGL(( gravityForceApply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void springTwoParticleForceApply(Particle *particle, int pairSize, const int *particleIdxList_1, const int *particleIdxList_2, const float *restLengthList, float *springConstant) { int pairIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (pairIdx >= pairSize) { return; } Particle &particle_1 = particle[particleIdxList_1[pairIdx]]; Particle &particle_2 = particle[particleIdxList_2[pairIdx]]; Vec3f &location_1 = particle_1.m_location; Vec3f &location_2 = particle_2.m_location; float restLength = restLengthList[pairIdx]; Vec3f locationVector = location_1 - location_2; Vec3f spring_force = *springConstant * (restLength - locationVector.length()) * locationVector/locationVector.length(); particle_1.addForce(&spring_force); spring_force = -spring_force; particle_2.addForce(&spring_force); } void SpringTwoParticleForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); hipLaunchKernelGGL(( springTwoParticleForceApply), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, pSystem->m_particleList, m_pairSize, m_particleIdxList_1, m_particleIdxList_2, m_restLengthList, m_constant); }
df140db69673a03c319d22205614ac2c4a2c6c15.cu
#include "Force.h" #include "ParticleSystem.h" __global__ void constantForceApply(Particle *particle, int particleSize, const Vec3f *constantForce) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } particle[particleIdx].addForce(constantForce); } void ConstantForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); constantForceApply<<< blocksPerGrid, threadsPerBlock>>>(pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void dampingForceApply(Particle *particle, int particleSize, const float *dampingConstant) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } const Vec3f &particleVelocity = particle[particleIdx].m_velocity; Vec3f dampingForce = -*dampingConstant * particleVelocity; particle[particleIdx].addForce(&dampingForce); } void DampingForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); dampingForceApply<<< blocksPerGrid, threadsPerBlock>>>(pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void springForceApply(Particle *particle, int particleSize, const float *springConstant, const Vec3f *restLocation) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } const Vec3f &particleLocation = particle[particleIdx].m_location; Vec3f springForce = *springConstant * (*restLocation - particleLocation); particle[particleIdx].addForce(&springForce); } void SpringForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); springForceApply<<< blocksPerGrid, threadsPerBlock>>>(pSystem->m_particleList, pSystem->m_particleSize, m_constant, m_restLocation); } __global__ void gravityForceApply(Particle *particle, int particleSize, const Vec3f *gravityConstant) { int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (particleIdx >= particleSize) { return; } float particleMass = particle[particleIdx].m_mass; Vec3f gravityForce = particleMass * -*gravityConstant; particle[particleIdx].addForce(&gravityForce); } void GravityForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); gravityForceApply<<< blocksPerGrid, threadsPerBlock>>>(pSystem->m_particleList, pSystem->m_particleSize, m_constant); } __global__ void springTwoParticleForceApply(Particle *particle, int pairSize, const int *particleIdxList_1, const int *particleIdxList_2, const float *restLengthList, float *springConstant) { int pairIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x; if (pairIdx >= pairSize) { return; } Particle &particle_1 = particle[particleIdxList_1[pairIdx]]; Particle &particle_2 = particle[particleIdxList_2[pairIdx]]; Vec3f &location_1 = particle_1.m_location; Vec3f &location_2 = particle_2.m_location; float restLength = restLengthList[pairIdx]; Vec3f locationVector = location_1 - location_2; Vec3f spring_force = *springConstant * (restLength - locationVector.length()) * locationVector/locationVector.length(); particle_1.addForce(&spring_force); spring_force = -spring_force; particle_2.addForce(&spring_force); } void SpringTwoParticleForce::applyForce(ParticleSystem *pSystem) { dim3 blocksPerGrid(16, 16); dim3 threadsPerBlock(16, 16); springTwoParticleForceApply<<< blocksPerGrid, threadsPerBlock>>>(pSystem->m_particleList, m_pairSize, m_particleIdxList_1, m_particleIdxList_2, m_restLengthList, m_constant); }
21fa5668e2f88fa09b05e67ef480997095511eb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_avg_pool3d_backward_native.h> #include <ATen/ops/adaptive_avg_pool3d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <ATen/native/AdaptivePooling.h> #include <algorithm> #include <cfloat> #include <cmath> namespace at::native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( const scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time const scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels const scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( const scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragepool<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, const scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time const scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; const scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, const scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( adaptiveaveragegradinput<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, const scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time const scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; const scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAddNoReturn(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, const scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = ::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); hipLaunchKernelGGL(( atomicadaptiveaveragegradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_HIP_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 1; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "adaptive_avg_pool3d_cuda(): Expected 4D or 5D tensor, but got ", input_.sizes()); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const scalar_t* input_data = input.const_data_ptr<scalar_t>(); scalar_t* output_data = output.mutable_data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; adaptive_pool_empty_output_check(gradOutput_, "adaptive_avg_pool3d_backward"); checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.mutable_data_ptr<scalar_t>(); const scalar_t* gradOutput_data = gradOutput.const_data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.mutable_data_ptr<scalar_t>(); const scalar_t* gradOutput_data = gradOutput.const_data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace at::native
21fa5668e2f88fa09b05e67ef480997095511eb9.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <c10/util/Exception.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/adaptive_avg_pool3d_backward_native.h> #include <ATen/ops/adaptive_avg_pool3d_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #endif #include <ATen/native/AdaptivePooling.h> #include <algorithm> #include <cfloat> #include <cmath> namespace at::native { namespace { __device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) { return (a / b) * c + ((a % b) * c) / b; } __device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) { return 1 + ((a + 1) * c - 1) / b; } // 5d tensor B x D x T x H x W // All kernels view batch dim B and dim D as collapsed. /* * Description: * this function adaptively average pools an input 5D tensor along dimensions * 2, 3, and 4 5D input, 5D output * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragepool( const scalar_t *input, scalar_t *output, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW, int64_t offsetZ) { // iterates on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // input offset by slice/feature and earliest relevant frame/time const scalar_t *input_dt = input + d*istrideD + istartT*istrideT; // output offset by slice/feature and frame/time scalar_t *output_dt = output + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the average pooling from corresponding input pixels const scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW; scalar_t *ptr_output = output_dt + oh*osizeW + ow; accscalar_t sum = static_cast<accscalar_t>(0); int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { scalar_t val = ptr_input[ih*istrideH + iw*istrideW]; sum += static_cast<accscalar_t>(val); } } ptr_input += istrideT; // next input frame } // Update output const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW); *ptr_output = static_cast<scalar_t>(sum / divide_factor); } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragepool_loop( const scalar_t *input_data, scalar_t *output_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragepool<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). */ template <typename scalar_t, typename accscalar_t> __global__ void adaptiveaveragegradinput( scalar_t *gradInput, const scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on input pixels int it, ih, iw; // compute offsets based on thread/block ID int istartH = blockIdx.y * blockDim.y + threadIdx.y; int iendH = isizeH; int istepH = gridDim.y * blockDim.y; int istartW = threadIdx.x; int iendW = isizeW; int istepW = blockDim.x; // select input plane int64_t i_plane = blockIdx.x + offsetZ; it = i_plane % isizeT; // output frame/time int d = i_plane / isizeT; // slice/feature // output frame/time range is fixed. int ostartT = start_index(it, isizeT, osizeT); int oendT = end_index(it, isizeT, osizeT); // gradInput offset by slice/feature and frame/time. scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW; // gradOutput offset by slice/feature and earliest relevant frame/time const scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW; // For all input pixels... for (ih = istartH; ih < iendH; ih += istepH) { int ostartH = start_index(ih, isizeH, osizeH); int oendH = end_index(ih, isizeH, osizeH); for (iw = istartW; iw < iendW; iw += istepW) { int ostartW = start_index(iw, isizeW, osizeW); int oendW = end_index(iw, isizeW, osizeW); // Compute the gradients from corresponding output pixels scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw; const scalar_t *ptr_gradOutput = gradOutput_dt; // for all relevant output pixels int ot, oh, ow; for (ot = ostartT; ot < oendT; ++ot) { int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT); for (oh = ostartH; oh < oendH; ++oh) { int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH); for (ow = ostartW; ow < oendW; ++ow) { int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW); const accscalar_t divide_factor = kW * kH * kT; accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor); *ptr_gradInput += static_cast<scalar_t>(grad_delta); } } ptr_gradOutput += osizeH*osizeW; // next output frame } } } } template <typename scalar_t, typename accscalar_t> void adaptiveaveragegradinput_loop( scalar_t *gradInput_data, const scalar_t *gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); // each H*W plane is processed by blocksH thread blocks int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); adaptiveaveragegradinput<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } /* * Description: * This function computes the gradInput from gradOutput. * * gridDim.y blocks work together on a single 2D output plane specified by * (blockIdx.x + offsetZ). * * (uses atomic add) * */ template <typename scalar_t> __global__ void atomicadaptiveaveragegradinput( scalar_t *gradInput, const scalar_t *gradOutput, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW, int64_t offsetZ) { // iterators on output pixels int ot, oh, ow; // compute offsets based on thread/block ID int ostartH = blockIdx.y * blockDim.y + threadIdx.y; int oendH = osizeH; int ostepH = gridDim.y * blockDim.y; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; // select output plane int64_t o_plane = blockIdx.x + offsetZ; ot = o_plane % osizeT; // output frame/time int d = o_plane / osizeT; // output slice/feature // input frame/time range is fixed. int istartT = start_index(ot, osizeT, isizeT); int iendT = end_index(ot, osizeT, isizeT); int kT = iendT - istartT; // gradInput offset by slice/feature and earliest relevant frame/time scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW; // gradOutput offset by slice/feature and frame/time const scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW; // For all output pixels... for (oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for (ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the gradients from corresponding input pixels scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW; const scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow; scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW; int it, ih, iw; for (it = 0; it < kT; ++it) { for (ih = 0; ih < kH; ++ih) { for (iw = 0; iw < kW; ++iw) { gpuAtomicAddNoReturn(&(ptr_gradInput[ih*isizeW + iw]), grad_delta); } } ptr_gradInput += isizeH*isizeW; // next input frame } } } } template <typename scalar_t> void atomicadaptiveaveragegradinput_loop( scalar_t* gradInput_data, const scalar_t* gradOutput_data, int64_t totalZ, int isizeT, int isizeH, int isizeW, int osizeT, int osizeH, int osizeW) { int64_t offsetZ = 0; dim3 threads(32, 8); int blocksH = std::max((int)(16L / totalZ), 1); while (totalZ > 0) { dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH); atomicadaptiveaveragegradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ); C10_CUDA_KERNEL_LAUNCH_CHECK(); totalZ -= 65535; offsetZ += 65535; } } // 5D tensor B x D x T x H x w void adaptive_avg_pool3d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef& output_size) { TensorArg output_arg{output, "output", 1}; TensorArg input_arg{input_, "input_", 2}; checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg}); for (int64_t i = 1; i < input_.ndimension(); i++) { TORCH_CHECK( input_.size(i) > 0, "adaptive_avg_pool3d_cuda(): Expected input to have non-zero size for non-batch dimensions, " "but input has sizes ", input_.sizes(), " with dimension ", i, " being empty"); } TORCH_CHECK( (input_.ndimension() == 4 || input_.ndimension() == 5), "adaptive_avg_pool3d_cuda(): Expected 4D or 5D tensor, but got ", input_.sizes()); // the jit sometimes passes output_size.size() == 1 TORCH_CHECK( output_size.size() == 1 || output_size.size() == 3, "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3"); int64_t osizeT = output_size[0]; int64_t osizeH = output_size[1]; int64_t osizeW = output_size[2]; int64_t sizeD, isizeT, isizeH, isizeW; int64_t istrideD, istrideT, istrideH, istrideW; int64_t totalZ; const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous(); if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); istrideD = input.stride(0); istrideT = input.stride(1); istrideH = input.stride(2); istrideW = input.stride(3); output.resize_({sizeD, osizeT, osizeH, osizeW}); totalZ = sizeD * osizeT; } else { int64_t sizeB = input.size(0); sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); istrideD = input.stride(1); istrideT = input.stride(2); istrideH = input.stride(3); istrideW = input.stride(4); output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW}); totalZ = sizeB * sizeD * osizeT; } if (output.numel() == 0) { return; } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const scalar_t* input_data = input.const_data_ptr<scalar_t>(); scalar_t* output_data = output.mutable_data_ptr<scalar_t>(); adaptiveaveragepool_loop<scalar_t, accscalar_t>( input_data, output_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW); }); } void adaptive_avg_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input) { TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2}; TensorArg input_arg{input, "input", 3}; adaptive_pool_empty_output_check(gradOutput_, "adaptive_avg_pool3d_backward"); checkAllSameGPU( "adaptive_avg_pool3d_out_cuda", {grad_input_arg, grad_output_arg, input_arg}); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); if (gradInput.numel() == 0) { return; } gradInput.zero_(); int64_t sizeD, isizeT, isizeH, isizeW; int64_t osizeT, osizeH, osizeW; int64_t totalZ; if (input.ndimension() == 4) { sizeD = input.size(0); isizeT = input.size(1); isizeH = input.size(2); isizeW = input.size(3); osizeT = gradOutput.size(1); osizeH = gradOutput.size(2); osizeW = gradOutput.size(3); } else { sizeD = input.size(1); isizeT = input.size(2); isizeH = input.size(3); isizeW = input.size(4); osizeT = gradOutput.size(2); osizeH = gradOutput.size(3); osizeW = gradOutput.size(4); } bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0); if (input.ndimension() == 4) { totalZ = atomic ? sizeD * osizeT : sizeD * isizeT; } else { int sizeB = input.size(0); totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT; } if (atomic) { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.mutable_data_ptr<scalar_t>(); const scalar_t* gradOutput_data = gradOutput.const_data_ptr<scalar_t>(); atomicadaptiveaveragegradinput_loop( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } else { AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; scalar_t* gradInput_data = gradInput.mutable_data_ptr<scalar_t>(); const scalar_t* gradOutput_data = gradOutput.const_data_ptr<scalar_t>(); adaptiveaveragegradinput_loop<scalar_t, accscalar_t>( gradInput_data, gradOutput_data, totalZ, isizeT, isizeH, isizeW, osizeT, osizeH, osizeW); }); } } } // namespace Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input, IntArrayRef output_size, Tensor& output) { adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor adaptive_avg_pool3d_cuda( const Tensor& input, IntArrayRef output_size) { auto output = at::empty({0}, input.options()); adaptive_avg_pool3d_out_cuda_template(output, input, output_size); return output; } Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_, const Tensor& input, Tensor& gradInput) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda"); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } Tensor adaptive_avg_pool3d_backward_cuda( const Tensor& gradOutput_, const Tensor& input) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda"); auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input); return gradInput; } } // namespace at::native
8fe39898fe1ff537b0bcff78dc556cb698d67d1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; int width = numAColumns; float Pvalue = 0; if((Row < numCRows) && (Col < numCColumns)){ //float Pvalue = 0; for(int k =0; k < width; ++k){ Pvalue += A[Row * width + k] * B[k * numBColumns + Col]; } C[Row * numCColumns + Col] = Pvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) int blocks = 64; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *) malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void **) &deviceA, numARows * numAColumns * sizeof(float)); hipMalloc((void **) &deviceB, numBRows * numBColumns * sizeof(float)); hipMalloc((void **) &deviceC, numCRows * numCColumns * sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(deviceC, hostC, numCRows * numCColumns * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid(blocks, blocks); dim3 dimBlock((numCColumns+blocks-1)/blocks, (numCRows+blocks-1)/blocks); //dim3 dimBlock((numCRows+ 63) /64, (numCColumns + 63)/64); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
8fe39898fe1ff537b0bcff78dc556cb698d67d1c.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; int width = numAColumns; float Pvalue = 0; if((Row < numCRows) && (Col < numCColumns)){ //float Pvalue = 0; for(int k =0; k < width; ++k){ Pvalue += A[Row * width + k] * B[k * numBColumns + Col]; } C[Row * numCColumns + Col] = Pvalue; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) int blocks = 64; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *) malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void **) &deviceA, numARows * numAColumns * sizeof(float)); cudaMalloc((void **) &deviceB, numBRows * numBColumns * sizeof(float)); cudaMalloc((void **) &deviceC, numCRows * numCColumns * sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, numARows * numAColumns * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, numBRows * numBColumns * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(deviceC, hostC, numCRows * numCColumns * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid(blocks, blocks); dim3 dimBlock((numCColumns+blocks-1)/blocks, (numCRows+blocks-1)/blocks); //dim3 dimBlock((numCRows+ 63) /64, (numCColumns + 63)/64); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, numCRows * numCColumns * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
f09a2efc951cb96f6f2276f020058867bb5be75f.hip
// !!! This is a file automatically generated by hipify!!! #include <hiprand/hiprand.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <iostream> #include <chrono> #include <rocm_smi/rocm_smi.h> #include <cstdio> #include <cstdlib> #include <string> static int deviceID = 0; FILE* test_result; std::string file_name; int gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { rsmi_status_t result; unsigned int temp = 0; unsigned int clock_freq = 0; result = nvmlInit(); if (RSMI_STATUS_SUCCESS != result) { printf("Failed to initialize NVML: %s\n", nvmlErrorString(result)); printf("Press ENTER to continue...\n"); getchar(); return 1; } uint32_t device; result = nvmlDeviceGetHandleByIndex(deviceID, &device); if (RSMI_STATUS_SUCCESS != result) { printf("Failed to get handle for device %i: %s\n", deviceID, nvmlErrorString(result)); result = nvmlShutdown(); if (RSMI_STATUS_SUCCESS != result) printf("Failed to shutdown NVML: %s\n", nvmlErrorString(result)); printf("Press ENTER to continue...\n"); getchar(); return 1; } int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; const double G_operations = 2*double(m)/1000*double(k)/1000*double(n)/1000; hipblasHandle_t handle; hipblasCreate(&handle); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); test_result = fopen(file_name.c_str(), "a"); fprintf(test_result,"Temperature\t TFLOPS\t CLOCKS\t \n"); fclose(test_result); while(temp < 95) { hipEventRecord(start); hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); result = nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, &temp); if (RSMI_STATUS_SUCCESS != result) { printf("Failed to get temperature of device %i: %s\n", 0, nvmlErrorString(result)); } result = nvmlDeviceGetClockInfo(device, NVML_CLOCK_SM , &clock_freq); if (RSMI_STATUS_SUCCESS != result) { printf("Failed to get clock frequency of device %i: %s\n", 0, nvmlErrorString(result)); } std::cout << "Temperature," << temp << ","; std::cout << "TFLOPS," << G_operations/milliseconds << ","; std::cout << "CLOCKS," << clock_freq << "\n"; test_result = fopen(file_name.c_str(),"w"); fprintf(test_result,"%d\t %f\t %d\t \n", temp, G_operations/milliseconds, clock_freq); fclose(test_result); } hipblasDestroy(handle); result = nvmlShutdown(); if (RSMI_STATUS_SUCCESS != result) printf("Failed to shutdown NVML: %s\n", nvmlErrorString(result)); return 0; } void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { hiprandGenerator_t prng; hiprandCreateGenerator(&prng, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); hiprandGenerateUniform(prng, A, nr_rows_A * nr_cols_A); } int main(const int argc, const char *argv[]) { if(strcmp(argv[1], "-device") == 0) { deviceID = (int)atoi(argv[2]); } printf("Using device %d\n",deviceID); file_name ="../temperature_" + std::to_string(deviceID) + ".csv"; hipSetDevice(deviceID); int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; // int m = 28*28, n = 8*128, k = 96; int m = 16000, n = 16000, k = 16000; nr_rows_A = m; nr_rows_C = m; nr_cols_A = n; nr_rows_B = n; nr_cols_B = k; nr_cols_C = k; float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float)); float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float)); float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float)); float *d_A, *d_B, *d_C; hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float)); hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float)); hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float)); GPU_fill_rand(d_A, nr_rows_A, nr_cols_A); GPU_fill_rand(d_B, nr_rows_B, nr_cols_B); hipMemcpy(h_A,d_A,nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(h_B,d_B,nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyDeviceToHost); gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_cols_B); // hipMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
f09a2efc951cb96f6f2276f020058867bb5be75f.cu
#include <curand.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <iostream> #include <chrono> #include <nvml.h> #include <cstdio> #include <cstdlib> #include <string> static int deviceID = 0; FILE* test_result; std::string file_name; int gpu_blas_mmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { nvmlReturn_t result; unsigned int temp = 0; unsigned int clock_freq = 0; result = nvmlInit(); if (NVML_SUCCESS != result) { printf("Failed to initialize NVML: %s\n", nvmlErrorString(result)); printf("Press ENTER to continue...\n"); getchar(); return 1; } nvmlDevice_t device; result = nvmlDeviceGetHandleByIndex(deviceID, &device); if (NVML_SUCCESS != result) { printf("Failed to get handle for device %i: %s\n", deviceID, nvmlErrorString(result)); result = nvmlShutdown(); if (NVML_SUCCESS != result) printf("Failed to shutdown NVML: %s\n", nvmlErrorString(result)); printf("Press ENTER to continue...\n"); getchar(); return 1; } int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; const double G_operations = 2*double(m)/1000*double(k)/1000*double(n)/1000; cublasHandle_t handle; cublasCreate(&handle); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); test_result = fopen(file_name.c_str(), "a"); fprintf(test_result,"Temperature\t TFLOPS\t CLOCKS\t \n"); fclose(test_result); while(temp < 95) { cudaEventRecord(start); cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); result = nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, &temp); if (NVML_SUCCESS != result) { printf("Failed to get temperature of device %i: %s\n", 0, nvmlErrorString(result)); } result = nvmlDeviceGetClockInfo(device, NVML_CLOCK_SM , &clock_freq); if (NVML_SUCCESS != result) { printf("Failed to get clock frequency of device %i: %s\n", 0, nvmlErrorString(result)); } std::cout << "Temperature," << temp << ","; std::cout << "TFLOPS," << G_operations/milliseconds << ","; std::cout << "CLOCKS," << clock_freq << "\n"; test_result = fopen(file_name.c_str(),"w"); fprintf(test_result,"%d\t %f\t %d\t \n", temp, G_operations/milliseconds, clock_freq); fclose(test_result); } cublasDestroy(handle); result = nvmlShutdown(); if (NVML_SUCCESS != result) printf("Failed to shutdown NVML: %s\n", nvmlErrorString(result)); return 0; } void GPU_fill_rand(float *A, int nr_rows_A, int nr_cols_A) { curandGenerator_t prng; curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(prng, (unsigned long long) clock()); curandGenerateUniform(prng, A, nr_rows_A * nr_cols_A); } int main(const int argc, const char *argv[]) { if(strcmp(argv[1], "-device") == 0) { deviceID = (int)atoi(argv[2]); } printf("Using device %d\n",deviceID); file_name ="../temperature_" + std::to_string(deviceID) + ".csv"; cudaSetDevice(deviceID); int nr_rows_A, nr_cols_A, nr_rows_B, nr_cols_B, nr_rows_C, nr_cols_C; // int m = 28*28, n = 8*128, k = 96; int m = 16000, n = 16000, k = 16000; nr_rows_A = m; nr_rows_C = m; nr_cols_A = n; nr_rows_B = n; nr_cols_B = k; nr_cols_C = k; float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float)); float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float)); float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float)); float *d_A, *d_B, *d_C; cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float)); cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float)); cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float)); GPU_fill_rand(d_A, nr_rows_A, nr_cols_A); GPU_fill_rand(d_B, nr_rows_B, nr_cols_B); cudaMemcpy(h_A,d_A,nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(h_B,d_B,nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyDeviceToHost); gpu_blas_mmul(d_A, d_B, d_C, nr_rows_A, nr_cols_A, nr_cols_B); // cudaMemcpy(h_C,d_C,nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
d9c39f34110fad61f093a8f47209c340e5d257c5.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char bessel_y1_name[] = "bessel_y1_forward"; void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() { jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return bessel_y1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda); } // namespace at::native
d9c39f34110fad61f093a8f47209c340e5d257c5.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char bessel_y1_name[] = "bessel_y1_forward"; void bessel_y1_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() { jitted_gpu_kernel<bessel_y1_name, scalar_t, scalar_t, 1>(iterator, bessel_y1_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "bessel_y1_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return bessel_y1_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_bessel_y1_stub, &bessel_y1_kernel_cuda); } // namespace at::native
b6ff4edd7567a5495fd1857d18ef09d3ea6fca4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <utility> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define CUDA_CHK(NAME, ARGS) { \ hipError_t cuda_err_code = NAME ARGS; \ if (cuda_err_code != hipSuccess) { \ printf("%s failed with %s\n", #NAME, hipGetErrorString(cuda_err_code)); \ abort(); \ } \ } #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif void Check_Kernel(const char *message){ hipError_t error = hipGetLastError(); if (error != hipSuccess){ fprintf(stderr,"Error: %s:%s\n",message, hipGetErrorString(error)); } } #define FPT float #define a(i,j) a[(i)*MAT1+(j)] #define MAT1 4 #define MAT2 MAT1*MAT1 #define TINY 1.0e-40 #define FAILVALUE -1.0e40 #define CHANNELGAP 19.7242 #define NOISE 4.313e-14 #define MBPT 15 #define GO 1 #define NOGO 0 hipChannelFormatDesc XTG_desc; texture<float,2,hipReadModeElementType> XTG; __device__ void d_pivot_decomp(FPT *a, int *p, int *q){ int i,j,k; int n=MAT1; int pi,pj,tmp; FPT max; FPT ftmp; for (k=0;k<n;k++){ pi=-1,pj=-1,max=FAILVALUE; //find pivot in submatrix a(k:n,k:n) for (i=k;i<n;i++) { for (j=k;j<n;j++) { if (fabs(a(i,j))>max){ max = fabs(a(i,j)); pi=i; pj=j; } } } //Swap Row tmp=p[k]; p[k]=p[pi]; p[pi]=tmp; for (j=0;j<n;j++){ ftmp=a(k,j); a(k,j)=a(pi,j); a(pi,j)=ftmp; } //Swap Col tmp=q[k]; q[k]=q[pj]; q[pj]=tmp; for (i=0;i<n;i++){ ftmp=a(i,k); a(i,k)=a(i,pj); a(i,pj)=ftmp; } //END PIVOT //check pivot size and decompose if ((fabs(a(k,k))>TINY)){//should always be true with pivoting for (i=k+1;i<n;i++){ //Column normalisation ftmp=a(i,k)/=a(k,k); for (j=k+1;j<n;j++){ //a(ik)*a(kj) subtracted from lower right submatrix elements a(i,j)-=(ftmp*a(k,j)); } } } //END DECOMPOSE } } __device__ void d_solve(FPT *a, FPT *x, int *p, int *q){ //forward substitution; see Golub, Van Loan 96 //And see http://www.cs.rutgers.edu/~richter/cs510/completePivoting.pdf int i,j,pi; FPT ftmp; FPT xtmp[MAT1]; //Swap rows (x=Px) for (i=0; i<MAT1; i++){ pi=p[i]; xtmp[i]=x[pi]; //value that should be here } //Lx=x //partially taken from Sourcebook on Parallel Computing p577 for (i=0;i<MAT1;i++){ ftmp=xtmp[i]; for (j=0;j<i;j++) ftmp-=a(i,j)*xtmp[j]; xtmp[i]=ftmp; //Unit lower triangular so second division unnecessary } //backward substitution //solves Uy=z xtmp[MAT1-1]/=a(MAT1-1,MAT1-1); for (i=MAT1-2;i>=0;i--){ ftmp=xtmp[i]; for (j=i+1;j<MAT1;j++){ ftmp-=a(i,j)*xtmp[j]; } xtmp[i]=(ftmp)/a(i,i);//non unit upper triangular so this division is necessary } //Last bit //solves x=Qy for (i=0;i<MAT1;i++){ pi=q[i]; x[i]=xtmp[pi]; } } __global__ void solve(FPT *A, FPT *B){ //Each thread solves the A[id]x[id]=b[id] problem int id= blockDim.x*blockIdx.x + threadIdx.x; int p_pivot[MAT1],q_pivot[MAT1]; if ((GO==1)){ for (int i=0;i<MAT1;i++) { p_pivot[i]=q_pivot[i]=i; } d_pivot_decomp(&A[id*MAT2],&p_pivot[0],&q_pivot[0]); d_solve(&A[id*MAT2],&B[id*MAT1],&p_pivot[0],&q_pivot[0]); } } //=============================================================================== // CALC_PSD ACCESSORY FUNCTIONS for set channel (woo-hoo!) //=============================================================================== //Generate the A and B for all possible bitloads (in this offset) //requires grid(MBPT^N,1,1) block(N,1,1) //where MBPT^(N-1)>65535, use offset to continue //thread.y's collaboratively populate A and B for their id //This probably hammers memory... __global__ void lk_prepare_permutations(FPT *A, FPT *B, int offset){ //Don't need k as its sorted at the host stage for the creation of xtg int j=threadIdx.x; int myid=blockIdx.x; int bitbangval=myid+offset; int bitload[MAT1], i; //rebase myid to base (MBPT) //Unfortunately theres no way around every thread working out its own bitload :( for (i=0; i<MAT1; i++){ bitload[i]=bitbangval%MBPT; bitbangval/=MBPT; } if (bitbangval==0){ for (i=0; i<MAT1; i++){ //Generate a row of A for this permutation and victim y A[myid*MAT2+j*MAT1+i]=-(CHANNELGAP*((1<<bitload[j])-1)*tex2D(XTG,i,j))/tex2D(XTG,j,j); } } //Generate an item of B //B[myid*MAT1+j]=(NOISE*CHANNELGAP*((1<<bitload[j])-1))/d_XTG[j*MAT1+j]; B[myid*MAT1+j]=(NOISE*CHANNELGAP*((1<<bitload[j])-1))/tex2D(XTG,j,j); //Repair an item of A //__syncthreads(); //Seems to help with memory coalescing A[blockIdx.x*MAT2+j*MAT1+j]=1; } //Solve all A and B psds together. //requires grid(MBPT^N/threadmax,1,1) block(threadmax,1,1) __global__ void solve_permutations(FPT *A, FPT *B, int offset){ int id=blockIdx.x*blockDim.x+threadIdx.x; int bitbangval=id+offset; int p_pivot[MAT1],q_pivot[MAT1]; int i; //simulate bitload generation for in-place id check, and pivots at the same time for (i=0; i<MAT1; i++){ bitbangval/=MBPT; p_pivot[i]=q_pivot[i]=i; } //Stopper for invalid id's (where bitcombinations is less than maximum blocksize} if (bitbangval==0){ //do the magic d_pivot_decomp(&A[id*MAT2],&p_pivot[0],&q_pivot[0]); d_solve(&A[id*MAT2],&B[id*MAT1],&p_pivot[0],&q_pivot[0]); } } //Finally Calculate the LK_Max_permutations __global__ void lk_max_permutations(FPT *P, FPT *LK, FPT *lambdas, FPT *w){ int id=blockIdx.x*blockDim.x+threadIdx.x; int bitbangval=id; FPT lk=0; int bitload[MAT1], i, broken=0; //At this point, B is populated with the P results. for (i=0;i<MAT1;i++){ bitload[i]=bitbangval%MBPT; bitbangval/=MBPT; } if (bitbangval==0){//check for out of range id's for (i=0;i<MAT1;i++){ //Need to check for negative B's if (P[id*MAT1+i]<0) broken++; lk+=(bitload[i]*w[i])-(lambdas[i]*P[id*MAT1+i]); } //If anything is broken return a failing value (around -inf) if (broken==0) LK[id]=lk; else LK[id]=FAILVALUE; } } int main(){ //What are you actually trying to do: // generate 2 input matrixes, (NxN,Nx1) and 1 output (1xN) // do this over outerloop length for threadiding printf("Hello there\n"); hipSetDevice(0); printf("Set Device\n"); const unsigned int matrixcount=4; const unsigned int ncombos=pow(MBPT,MAT1); const unsigned int outerloop=ncombos; const unsigned int matsize=MAT2*outerloop; const unsigned int vecsize=MAT1*outerloop; //float a[]={1,3,-2,3,5,6,2,4,3}; //const float exampleA[]={7,3,-11,-6,7,10,-11,2,-2}; //const float exampleA[]={4,3,6,3}; //const float b[]={5,7,8}; //const float exampleB[]={4,5}; //const float x[]={5e-10,7e-20,8e-8,7e-20,8e-8,5e-10,8e-20,5e-8,7e-10}; //xtg analogue const float x[]={//this one breaks 7.15337843e-09, 9.98540799e-18, 8.27619149e-13, 9.98540799e-18, 1.79722338e-07, 7.45386129e-06, 1.79722338e-07, 5.17430336e-10, 8.27619149e-13, 9.98540799e-18, 7.15337843e-09, 9.98540799e-18, 1.79722338e-07, 5.17430336e-10, 1.79722338e-07, 7.45386129e-06}; const float x2[]={//this one should be fine 7.66152695e-09, 1.08253155e-17, 8.72877254e-13, 1.08253155e-17, 1.79434933e-07, 7.76722237e-06, 1.79434933e-07, 5.30951476e-10, 8.72877254e-13, 1.08253155e-17, 7.66152695e-09, 1.08253155e-17, 1.79434933e-07, 5.30951476e-10, 1.79434933e-07, 7.76722237e-06}; const float x178[]={ 5.15676578e-11, 2.60163643e-20, 1.55231521e-14, 2.60163643e-20, 1.74280845e-07, 3.86365544e-07, 1.74280845e-07, 6.97834034e-11, 1.55231521e-14, 2.60163643e-20, 5.15676578e-11, 2.60163643e-20, 1.74280845e-07, 6.97834034e-11, 1.74280845e-07, 3.86365544e-07}; const float x179[]={//The 'new' broken one 4.87663895e-11, 2.42887090e-20, 1.48197082e-14, 2.42887090e-20, 1.73987318e-07, 3.73631407e-07, 1.73987318e-07, 6.81261232e-11, 1.48197082e-14, 2.42887090e-20, 4.87663895e-11, 2.42887090e-20, 1.73987318e-07, 6.81261232e-11, 1.73987318e-07, 3.73631407e-07}; //memory allocations int h_offset=0; FPT* h_A = (FPT*)malloc(sizeof(FPT)*matsize);//empty till after FPT* h_b = (FPT*)malloc(sizeof(FPT)*vecsize);//empty till after FPT* h_l = (FPT*)malloc(sizeof(FPT)*vecsize); FPT* h_x = (FPT*)malloc(sizeof(float)*MAT2); FPT* d_A; FPT* d_b; //FPT* d_x; hipArray* d_x; FPT* d_l; CUDA_CHK(hipMalloc, (&d_A, sizeof(FPT)*matsize)); CUDA_CHK(hipMalloc, (&d_b, sizeof(FPT)*vecsize)); CUDA_CHK(hipMalloc, (&d_l, sizeof(FPT)*vecsize)); XTG_desc=hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); //CUDA_CHK(hipMalloc, (&d_x, sizeof(float)*MAT2)); CUDA_CHK(hipMallocArray,( &d_x, &XTG_desc, sizeof(float)*MAT1, sizeof(float)*MAT1)); //don't need these on device till after permutation generation FPT* h_lk = (FPT*)malloc(sizeof(FPT)*outerloop);//empty till after FPT* h_w = (FPT*)malloc(sizeof(FPT)*vecsize); FPT* d_lk; FPT* d_w; printf("Mallocd\n"); //fill matrix and vector with stuff for (unsigned int i = 0;i<outerloop;i++){ //printf("\n%d\n",i); for (unsigned int j = 0; j < MAT1; j++){ h_l[(i*MAT1)+j]=1.0; h_w[(i*MAT1)+j]=1.0; if (i<MAT1) h_x[(i*MAT1)+j]=x179[i*MAT1+j]; //printf("\n%d:",j); //for (unsigned int k=0; k < MAT1; k++){ //printf("%d,",k); //h_A[(i*MAT2)+(j*MAT1)+k]=a(j,k); //} } } printf("Generated\n"); //copy values to device // CUDA_CHK(hipMemcpy, (d_A, h_A, sizeof(FPT)*matsize, hipMemcpyHostToDevice)); CUDA_CHK(hipMemcpy, (d_l, h_l, sizeof(FPT)*vecsize, hipMemcpyHostToDevice)); //CUDA_CHK(hipMemcpy, (d_x, h_x, sizeof(float)*MAT2, hipMemcpyHostToDevice)); CUDA_CHK(hipMemcpyToArray,( d_x, 0, 0, h_x, sizeof(float)*MAT2, hipMemcpyHostToDevice)); // set texture parameters XTG.addressMode[0] = hipAddressModeWrap; XTG.addressMode[1] = hipAddressModeWrap; XTG.filterMode = hipFilterModeLinear; XTG.normalized = false; CUDA_CHK(hipBindTextureToArray,(XTG,d_x,XTG_desc)); printf("Copied\n");/* for (unsigned int i=0; i<outerloop; i++){ printf("\n%d:x:A|l",i); //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ printf("\n%g:",h_x[i*MAT1+j]); for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } printf("|%g",h_l[i*MAT1+j]); } } puts("\n"); */ //parameters //dim3 blocksPerGrid((outerloop + threadsPerBlock.x -1)/threadsPerBlock.x,1,1); dim3 blocksPerGrid(ncombos,1,1); dim3 threadsPerBlock(MAT1,1,1); printf("TPB:%d,BPG:%d\n",threadsPerBlock.x,blocksPerGrid.x); //Execute hipEvent_t evt_start, evt_stop; CUDA_CHK(hipEventCreate, (&evt_start)); CUDA_CHK(hipEventCreate, (&evt_stop)); CUDA_CHK(hipEventRecord, (evt_start,0)); hipLaunchKernelGGL(( lk_prepare_permutations), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_A,d_b,h_offset); //hipDeviceSynchronize(); hipDeviceSynchronize(); Check_Kernel("Generate"); CUDA_CHK(hipMemcpy, (h_A,d_A, sizeof(FPT)*matsize, hipMemcpyDeviceToHost)); CUDA_CHK(hipMemcpy, (h_b,d_b, sizeof(FPT)*vecsize, hipMemcpyDeviceToHost)); //hipDeviceSynchronize(); hipDeviceSynchronize(); for (unsigned int i=10000; i<10100; i++){ printf("\n%d:A|b\n",i); //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } printf("|%g\n",h_b[i*MAT1+j]); } } puts("\n"); printf("Ran Generate\n"); //CUDA_CHK(hipFree, (d_x)); CUDA_CHK(hipMalloc, (&d_lk, sizeof(FPT)*outerloop)); CUDA_CHK(hipMalloc, (&d_w, sizeof(FPT)*vecsize)); CUDA_CHK(hipMemcpy, (d_w, h_w, sizeof(FPT)*vecsize, hipMemcpyHostToDevice)); dim3 threadsPerBlock_lksolve(256,1,1); dim3 blocksPerGrid_lksolve(ncombos/256,1,1); hipLaunchKernelGGL(( solve_permutations), dim3(blocksPerGrid_lksolve),dim3(threadsPerBlock_lksolve), 0, 0, d_A,d_b,h_offset); Check_Kernel("Solve"); printf("Ran Solve\n"); hipLaunchKernelGGL(( lk_max_permutations), dim3(blocksPerGrid_lksolve),dim3(threadsPerBlock_lksolve), 0, 0, d_b,d_lk, d_l, d_w); Check_Kernel("Max"); CUDA_CHK(hipMemcpy, (h_lk, d_lk, sizeof(FPT)*outerloop, hipMemcpyDeviceToHost)); CUDA_CHK(hipEventRecord, (evt_stop, 0)); CUDA_CHK(hipEventSynchronize, (evt_stop)); float total_time; CUDA_CHK(hipEventElapsedTime, (&total_time, evt_start, evt_stop)); float one_time = total_time * 1e-3; FPT lk_max=FAILVALUE; int lk; for (unsigned int i=0; i<outerloop; i++){ //printf("%d:A:LK:%g\n",i,h_lk[i]); if (h_lk[i]>lk_max){ lk=i; lk_max=h_lk[i]; printf("New LKMax:%d,(%g)\n",i,h_lk[i]); } //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } puts("\n"); } } printf("time: %g s\nlkmax:%g@%d\n", one_time,h_lk[lk],lk); hipEventDestroy(evt_start); hipEventDestroy(evt_stop); free(h_A); free(h_b); free(h_x); free(h_w); free(h_l); free(h_lk); CUDA_CHK(hipFree, (d_A)); CUDA_CHK(hipFree, (d_b)); CUDA_CHK(hipFree, (d_l)); CUDA_CHK(hipFree, (d_lk)); CUDA_CHK(hipFree, (d_w)); }
b6ff4edd7567a5495fd1857d18ef09d3ea6fca4b.cu
#include <utility> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define CUDA_CHK(NAME, ARGS) { \ cudaError_t cuda_err_code = NAME ARGS; \ if (cuda_err_code != cudaSuccess) { \ printf("%s failed with %s\n", #NAME, cudaGetErrorString(cuda_err_code)); \ abort(); \ } \ } #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef min #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif void Check_Kernel(const char *message){ cudaError_t error = cudaGetLastError(); if (error != cudaSuccess){ fprintf(stderr,"Error: %s:%s\n",message, cudaGetErrorString(error)); } } #define FPT float #define a(i,j) a[(i)*MAT1+(j)] #define MAT1 4 #define MAT2 MAT1*MAT1 #define TINY 1.0e-40 #define FAILVALUE -1.0e40 #define CHANNELGAP 19.7242 #define NOISE 4.313e-14 #define MBPT 15 #define GO 1 #define NOGO 0 cudaChannelFormatDesc XTG_desc; texture<float,2,cudaReadModeElementType> XTG; __device__ void d_pivot_decomp(FPT *a, int *p, int *q){ int i,j,k; int n=MAT1; int pi,pj,tmp; FPT max; FPT ftmp; for (k=0;k<n;k++){ pi=-1,pj=-1,max=FAILVALUE; //find pivot in submatrix a(k:n,k:n) for (i=k;i<n;i++) { for (j=k;j<n;j++) { if (fabs(a(i,j))>max){ max = fabs(a(i,j)); pi=i; pj=j; } } } //Swap Row tmp=p[k]; p[k]=p[pi]; p[pi]=tmp; for (j=0;j<n;j++){ ftmp=a(k,j); a(k,j)=a(pi,j); a(pi,j)=ftmp; } //Swap Col tmp=q[k]; q[k]=q[pj]; q[pj]=tmp; for (i=0;i<n;i++){ ftmp=a(i,k); a(i,k)=a(i,pj); a(i,pj)=ftmp; } //END PIVOT //check pivot size and decompose if ((fabs(a(k,k))>TINY)){//should always be true with pivoting for (i=k+1;i<n;i++){ //Column normalisation ftmp=a(i,k)/=a(k,k); for (j=k+1;j<n;j++){ //a(ik)*a(kj) subtracted from lower right submatrix elements a(i,j)-=(ftmp*a(k,j)); } } } //END DECOMPOSE } } __device__ void d_solve(FPT *a, FPT *x, int *p, int *q){ //forward substitution; see Golub, Van Loan 96 //And see http://www.cs.rutgers.edu/~richter/cs510/completePivoting.pdf int i,j,pi; FPT ftmp; FPT xtmp[MAT1]; //Swap rows (x=Px) for (i=0; i<MAT1; i++){ pi=p[i]; xtmp[i]=x[pi]; //value that should be here } //Lx=x //partially taken from Sourcebook on Parallel Computing p577 for (i=0;i<MAT1;i++){ ftmp=xtmp[i]; for (j=0;j<i;j++) ftmp-=a(i,j)*xtmp[j]; xtmp[i]=ftmp; //Unit lower triangular so second division unnecessary } //backward substitution //solves Uy=z xtmp[MAT1-1]/=a(MAT1-1,MAT1-1); for (i=MAT1-2;i>=0;i--){ ftmp=xtmp[i]; for (j=i+1;j<MAT1;j++){ ftmp-=a(i,j)*xtmp[j]; } xtmp[i]=(ftmp)/a(i,i);//non unit upper triangular so this division is necessary } //Last bit //solves x=Qy for (i=0;i<MAT1;i++){ pi=q[i]; x[i]=xtmp[pi]; } } __global__ void solve(FPT *A, FPT *B){ //Each thread solves the A[id]x[id]=b[id] problem int id= blockDim.x*blockIdx.x + threadIdx.x; int p_pivot[MAT1],q_pivot[MAT1]; if ((GO==1)){ for (int i=0;i<MAT1;i++) { p_pivot[i]=q_pivot[i]=i; } d_pivot_decomp(&A[id*MAT2],&p_pivot[0],&q_pivot[0]); d_solve(&A[id*MAT2],&B[id*MAT1],&p_pivot[0],&q_pivot[0]); } } //=============================================================================== // CALC_PSD ACCESSORY FUNCTIONS for set channel (woo-hoo!) //=============================================================================== //Generate the A and B for all possible bitloads (in this offset) //requires grid(MBPT^N,1,1) block(N,1,1) //where MBPT^(N-1)>65535, use offset to continue //thread.y's collaboratively populate A and B for their id //This probably hammers memory... __global__ void lk_prepare_permutations(FPT *A, FPT *B, int offset){ //Don't need k as its sorted at the host stage for the creation of xtg int j=threadIdx.x; int myid=blockIdx.x; int bitbangval=myid+offset; int bitload[MAT1], i; //rebase myid to base (MBPT) //Unfortunately theres no way around every thread working out its own bitload :( for (i=0; i<MAT1; i++){ bitload[i]=bitbangval%MBPT; bitbangval/=MBPT; } if (bitbangval==0){ for (i=0; i<MAT1; i++){ //Generate a row of A for this permutation and victim y A[myid*MAT2+j*MAT1+i]=-(CHANNELGAP*((1<<bitload[j])-1)*tex2D(XTG,i,j))/tex2D(XTG,j,j); } } //Generate an item of B //B[myid*MAT1+j]=(NOISE*CHANNELGAP*((1<<bitload[j])-1))/d_XTG[j*MAT1+j]; B[myid*MAT1+j]=(NOISE*CHANNELGAP*((1<<bitload[j])-1))/tex2D(XTG,j,j); //Repair an item of A //__syncthreads(); //Seems to help with memory coalescing A[blockIdx.x*MAT2+j*MAT1+j]=1; } //Solve all A and B psds together. //requires grid(MBPT^N/threadmax,1,1) block(threadmax,1,1) __global__ void solve_permutations(FPT *A, FPT *B, int offset){ int id=blockIdx.x*blockDim.x+threadIdx.x; int bitbangval=id+offset; int p_pivot[MAT1],q_pivot[MAT1]; int i; //simulate bitload generation for in-place id check, and pivots at the same time for (i=0; i<MAT1; i++){ bitbangval/=MBPT; p_pivot[i]=q_pivot[i]=i; } //Stopper for invalid id's (where bitcombinations is less than maximum blocksize} if (bitbangval==0){ //do the magic d_pivot_decomp(&A[id*MAT2],&p_pivot[0],&q_pivot[0]); d_solve(&A[id*MAT2],&B[id*MAT1],&p_pivot[0],&q_pivot[0]); } } //Finally Calculate the LK_Max_permutations __global__ void lk_max_permutations(FPT *P, FPT *LK, FPT *lambdas, FPT *w){ int id=blockIdx.x*blockDim.x+threadIdx.x; int bitbangval=id; FPT lk=0; int bitload[MAT1], i, broken=0; //At this point, B is populated with the P results. for (i=0;i<MAT1;i++){ bitload[i]=bitbangval%MBPT; bitbangval/=MBPT; } if (bitbangval==0){//check for out of range id's for (i=0;i<MAT1;i++){ //Need to check for negative B's if (P[id*MAT1+i]<0) broken++; lk+=(bitload[i]*w[i])-(lambdas[i]*P[id*MAT1+i]); } //If anything is broken return a failing value (around -inf) if (broken==0) LK[id]=lk; else LK[id]=FAILVALUE; } } int main(){ //What are you actually trying to do: // generate 2 input matrixes, (NxN,Nx1) and 1 output (1xN) // do this over outerloop length for threadiding printf("Hello there\n"); cudaSetDevice(0); printf("Set Device\n"); const unsigned int matrixcount=4; const unsigned int ncombos=pow(MBPT,MAT1); const unsigned int outerloop=ncombos; const unsigned int matsize=MAT2*outerloop; const unsigned int vecsize=MAT1*outerloop; //float a[]={1,3,-2,3,5,6,2,4,3}; //const float exampleA[]={7,3,-11,-6,7,10,-11,2,-2}; //const float exampleA[]={4,3,6,3}; //const float b[]={5,7,8}; //const float exampleB[]={4,5}; //const float x[]={5e-10,7e-20,8e-8,7e-20,8e-8,5e-10,8e-20,5e-8,7e-10}; //xtg analogue const float x[]={//this one breaks 7.15337843e-09, 9.98540799e-18, 8.27619149e-13, 9.98540799e-18, 1.79722338e-07, 7.45386129e-06, 1.79722338e-07, 5.17430336e-10, 8.27619149e-13, 9.98540799e-18, 7.15337843e-09, 9.98540799e-18, 1.79722338e-07, 5.17430336e-10, 1.79722338e-07, 7.45386129e-06}; const float x2[]={//this one should be fine 7.66152695e-09, 1.08253155e-17, 8.72877254e-13, 1.08253155e-17, 1.79434933e-07, 7.76722237e-06, 1.79434933e-07, 5.30951476e-10, 8.72877254e-13, 1.08253155e-17, 7.66152695e-09, 1.08253155e-17, 1.79434933e-07, 5.30951476e-10, 1.79434933e-07, 7.76722237e-06}; const float x178[]={ 5.15676578e-11, 2.60163643e-20, 1.55231521e-14, 2.60163643e-20, 1.74280845e-07, 3.86365544e-07, 1.74280845e-07, 6.97834034e-11, 1.55231521e-14, 2.60163643e-20, 5.15676578e-11, 2.60163643e-20, 1.74280845e-07, 6.97834034e-11, 1.74280845e-07, 3.86365544e-07}; const float x179[]={//The 'new' broken one 4.87663895e-11, 2.42887090e-20, 1.48197082e-14, 2.42887090e-20, 1.73987318e-07, 3.73631407e-07, 1.73987318e-07, 6.81261232e-11, 1.48197082e-14, 2.42887090e-20, 4.87663895e-11, 2.42887090e-20, 1.73987318e-07, 6.81261232e-11, 1.73987318e-07, 3.73631407e-07}; //memory allocations int h_offset=0; FPT* h_A = (FPT*)malloc(sizeof(FPT)*matsize);//empty till after FPT* h_b = (FPT*)malloc(sizeof(FPT)*vecsize);//empty till after FPT* h_l = (FPT*)malloc(sizeof(FPT)*vecsize); FPT* h_x = (FPT*)malloc(sizeof(float)*MAT2); FPT* d_A; FPT* d_b; //FPT* d_x; cudaArray* d_x; FPT* d_l; CUDA_CHK(cudaMalloc, (&d_A, sizeof(FPT)*matsize)); CUDA_CHK(cudaMalloc, (&d_b, sizeof(FPT)*vecsize)); CUDA_CHK(cudaMalloc, (&d_l, sizeof(FPT)*vecsize)); XTG_desc=cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //CUDA_CHK(cudaMalloc, (&d_x, sizeof(float)*MAT2)); CUDA_CHK(cudaMallocArray,( &d_x, &XTG_desc, sizeof(float)*MAT1, sizeof(float)*MAT1)); //don't need these on device till after permutation generation FPT* h_lk = (FPT*)malloc(sizeof(FPT)*outerloop);//empty till after FPT* h_w = (FPT*)malloc(sizeof(FPT)*vecsize); FPT* d_lk; FPT* d_w; printf("Mallocd\n"); //fill matrix and vector with stuff for (unsigned int i = 0;i<outerloop;i++){ //printf("\n%d\n",i); for (unsigned int j = 0; j < MAT1; j++){ h_l[(i*MAT1)+j]=1.0; h_w[(i*MAT1)+j]=1.0; if (i<MAT1) h_x[(i*MAT1)+j]=x179[i*MAT1+j]; //printf("\n%d:",j); //for (unsigned int k=0; k < MAT1; k++){ //printf("%d,",k); //h_A[(i*MAT2)+(j*MAT1)+k]=a(j,k); //} } } printf("Generated\n"); //copy values to device // CUDA_CHK(cudaMemcpy, (d_A, h_A, sizeof(FPT)*matsize, cudaMemcpyHostToDevice)); CUDA_CHK(cudaMemcpy, (d_l, h_l, sizeof(FPT)*vecsize, cudaMemcpyHostToDevice)); //CUDA_CHK(cudaMemcpy, (d_x, h_x, sizeof(float)*MAT2, cudaMemcpyHostToDevice)); CUDA_CHK(cudaMemcpyToArray,( d_x, 0, 0, h_x, sizeof(float)*MAT2, cudaMemcpyHostToDevice)); // set texture parameters XTG.addressMode[0] = cudaAddressModeWrap; XTG.addressMode[1] = cudaAddressModeWrap; XTG.filterMode = cudaFilterModeLinear; XTG.normalized = false; CUDA_CHK(cudaBindTextureToArray,(XTG,d_x,XTG_desc)); printf("Copied\n");/* for (unsigned int i=0; i<outerloop; i++){ printf("\n%d:x:A|l",i); //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ printf("\n%g:",h_x[i*MAT1+j]); for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } printf("|%g",h_l[i*MAT1+j]); } } puts("\n"); */ //parameters //dim3 blocksPerGrid((outerloop + threadsPerBlock.x -1)/threadsPerBlock.x,1,1); dim3 blocksPerGrid(ncombos,1,1); dim3 threadsPerBlock(MAT1,1,1); printf("TPB:%d,BPG:%d\n",threadsPerBlock.x,blocksPerGrid.x); //Execute cudaEvent_t evt_start, evt_stop; CUDA_CHK(cudaEventCreate, (&evt_start)); CUDA_CHK(cudaEventCreate, (&evt_stop)); CUDA_CHK(cudaEventRecord, (evt_start,0)); lk_prepare_permutations<<<blocksPerGrid,threadsPerBlock>>>(d_A,d_b,h_offset); //cudaDeviceSynchronize(); cudaThreadSynchronize(); Check_Kernel("Generate"); CUDA_CHK(cudaMemcpy, (h_A,d_A, sizeof(FPT)*matsize, cudaMemcpyDeviceToHost)); CUDA_CHK(cudaMemcpy, (h_b,d_b, sizeof(FPT)*vecsize, cudaMemcpyDeviceToHost)); //cudaDeviceSynchronize(); cudaThreadSynchronize(); for (unsigned int i=10000; i<10100; i++){ printf("\n%d:A|b\n",i); //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } printf("|%g\n",h_b[i*MAT1+j]); } } puts("\n"); printf("Ran Generate\n"); //CUDA_CHK(cudaFree, (d_x)); CUDA_CHK(cudaMalloc, (&d_lk, sizeof(FPT)*outerloop)); CUDA_CHK(cudaMalloc, (&d_w, sizeof(FPT)*vecsize)); CUDA_CHK(cudaMemcpy, (d_w, h_w, sizeof(FPT)*vecsize, cudaMemcpyHostToDevice)); dim3 threadsPerBlock_lksolve(256,1,1); dim3 blocksPerGrid_lksolve(ncombos/256,1,1); solve_permutations<<<blocksPerGrid_lksolve,threadsPerBlock_lksolve>>>(d_A,d_b,h_offset); Check_Kernel("Solve"); printf("Ran Solve\n"); lk_max_permutations<<<blocksPerGrid_lksolve,threadsPerBlock_lksolve>>>(d_b,d_lk, d_l, d_w); Check_Kernel("Max"); CUDA_CHK(cudaMemcpy, (h_lk, d_lk, sizeof(FPT)*outerloop, cudaMemcpyDeviceToHost)); CUDA_CHK(cudaEventRecord, (evt_stop, 0)); CUDA_CHK(cudaEventSynchronize, (evt_stop)); float total_time; CUDA_CHK(cudaEventElapsedTime, (&total_time, evt_start, evt_stop)); float one_time = total_time * 1e-3; FPT lk_max=FAILVALUE; int lk; for (unsigned int i=0; i<outerloop; i++){ //printf("%d:A:LK:%g\n",i,h_lk[i]); if (h_lk[i]>lk_max){ lk=i; lk_max=h_lk[i]; printf("New LKMax:%d,(%g)\n",i,h_lk[i]); } //printf("%.3lf|",h_x[i*MAT1]); for (unsigned int j=0; j<MAT1; j++){ for (unsigned int k=0;k<MAT1; k++){ printf("%g,",h_A[(i*MAT2)+(j*MAT1)+k]); } puts("\n"); } } printf("time: %g s\nlkmax:%g@%d\n", one_time,h_lk[lk],lk); cudaEventDestroy(evt_start); cudaEventDestroy(evt_stop); free(h_A); free(h_b); free(h_x); free(h_w); free(h_l); free(h_lk); CUDA_CHK(cudaFree, (d_A)); CUDA_CHK(cudaFree, (d_b)); CUDA_CHK(cudaFree, (d_l)); CUDA_CHK(cudaFree, (d_lk)); CUDA_CHK(cudaFree, (d_w)); }
f138367fecf72f0a1f4c269e41c95e7cef04dc7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define MATRIX_SIZE 100 #define BLOCK_DIM 16 __global__ void matrixSquared(int *initialMatrix, int *finalMatrix) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * MATRIX_SIZE; if (col < MATRIX_SIZE && row < MATRIX_SIZE) { finalMatrix[index] = initialMatrix[index] * initialMatrix[index]; } } int main(int argc, char ** argv) { const int MATRIX_BYTES = MATRIX_SIZE * MATRIX_SIZE * sizeof(int); // generate the input matrix on the host int h_in[MATRIX_SIZE][MATRIX_SIZE]; printf("Initial matrix\n"); int i, j; for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { h_in[i][j] = rand() % 10; printf("%d ", h_in[i][j]); } printf("\n"); } int h_out[MATRIX_SIZE][MATRIX_SIZE]; // declare GPU memory pointers int * d_in; int * d_out; // allocate GPU memory hipMalloc((void**) &d_in, MATRIX_BYTES); hipMalloc((void**) &d_out, MATRIX_BYTES); // transfer the matrix to the GPU hipMemcpy(d_in, h_in, MATRIX_BYTES, hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); int dimX = (MATRIX_SIZE + dimBlock.x - 1) / dimBlock.x; int dimY = (MATRIX_SIZE + dimBlock.y - 1) / dimBlock.y; dim3 dimGrid(dimX, dimY); //printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); // launch the kernel hipLaunchKernelGGL(( matrixSquared), dim3(dimGrid), dim3(dimBlock), 0, 0, d_in, d_out); // copy back the result matrix to the CPU hipMemcpy(h_out, d_out, MATRIX_BYTES, hipMemcpyDeviceToHost); // print out the resulting matrix printf("Result matrix\n"); for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { printf("%d ", h_out[i][j]); } printf("\n"); } hipFree(d_in); hipFree(d_out); return 0; }
f138367fecf72f0a1f4c269e41c95e7cef04dc7b.cu
#include <stdio.h> #include <stdlib.h> #define MATRIX_SIZE 100 #define BLOCK_DIM 16 __global__ void matrixSquared(int *initialMatrix, int *finalMatrix) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * MATRIX_SIZE; if (col < MATRIX_SIZE && row < MATRIX_SIZE) { finalMatrix[index] = initialMatrix[index] * initialMatrix[index]; } } int main(int argc, char ** argv) { const int MATRIX_BYTES = MATRIX_SIZE * MATRIX_SIZE * sizeof(int); // generate the input matrix on the host int h_in[MATRIX_SIZE][MATRIX_SIZE]; printf("Initial matrix\n"); int i, j; for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { h_in[i][j] = rand() % 10; printf("%d ", h_in[i][j]); } printf("\n"); } int h_out[MATRIX_SIZE][MATRIX_SIZE]; // declare GPU memory pointers int * d_in; int * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, MATRIX_BYTES); cudaMalloc((void**) &d_out, MATRIX_BYTES); // transfer the matrix to the GPU cudaMemcpy(d_in, h_in, MATRIX_BYTES, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); int dimX = (MATRIX_SIZE + dimBlock.x - 1) / dimBlock.x; int dimY = (MATRIX_SIZE + dimBlock.y - 1) / dimBlock.y; dim3 dimGrid(dimX, dimY); //printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); // launch the kernel matrixSquared<<<dimGrid, dimBlock>>>(d_in, d_out); // copy back the result matrix to the CPU cudaMemcpy(h_out, d_out, MATRIX_BYTES, cudaMemcpyDeviceToHost); // print out the resulting matrix printf("Result matrix\n"); for (i = 0; i < MATRIX_SIZE;i++) { for (j = 0; j < MATRIX_SIZE;j++) { printf("%d ", h_out[i][j]); } printf("\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
6182969590b660201b24228c211c67d19394bdde.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define TEST_SIZE 35 #define BLOCK_WIDTH 4 #define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y))) // Computes a blockwise exclusive sum scan __global__ void partialScan(unsigned int *d_in, unsigned int *d_out, unsigned int *d_total, size_t n) { __shared__ unsigned int temp[BLOCK_WIDTH]; int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { temp[tx] = d_in[index]; } else { temp[tx] = 0; } __syncthreads(); // Perform the actual scan for(int offset = 1; offset < BLOCK_WIDTH; offset <<= 1) { if(tx + offset < BLOCK_WIDTH) { temp[tx + offset] += temp[tx]; } __syncthreads(); } // Shift when copying the result so as to make it an exclusive scan if(tx +1 < BLOCK_WIDTH && index + 1 < n) { d_out[index + 1] = temp[tx]; } d_out[0] = 0; // Store the total sum of each block d_total[bx] = temp[BLOCK_WIDTH - 1]; } // Compute a map on a partial scan to create a total scan from __global__ void mapScan(unsigned int *d_array, unsigned int *d_total, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { d_array[index] += d_total[bx]; } } // Compute exclusive sum scan for arbitrary sized array (device pointers as input) void totalScan(unsigned int *d_in, unsigned int *d_out, size_t n) { size_t numBlocks = CEILING_DIVIDE(n, BLOCK_WIDTH); unsigned int *d_total; hipMalloc(&d_total, sizeof(unsigned int) * numBlocks); hipMemset(d_total, 0, sizeof(unsigned int) * numBlocks); hipLaunchKernelGGL(( partialScan), dim3(numBlocks), dim3(BLOCK_WIDTH), 0, 0, d_in, d_out, d_total, n); if(numBlocks > 1) { unsigned int *d_total_scanned; hipMalloc(&d_total_scanned, sizeof(unsigned int) * numBlocks); hipMemset(d_total_scanned, 0, sizeof(unsigned int) * numBlocks); totalScan(d_total, d_total_scanned, numBlocks); hipLaunchKernelGGL(( mapScan), dim3(numBlocks), dim3(BLOCK_WIDTH), 0, 0, d_out, d_total_scanned, n); hipFree(d_total_scanned); } hipFree(d_total); } //////////////////////////////////////////////////////////////////////////////// // Wrapper for totalScan (host pointers as input) void totalScanHost(unsigned int *h_in, unsigned int *h_out, size_t n) { unsigned int *d_in; unsigned int *d_out; size_t memsize = sizeof(unsigned int) * n; hipMalloc(&d_in, memsize); hipMalloc(&d_out, memsize); hipMemcpy(d_in, h_in, memsize, hipMemcpyHostToDevice); totalScan(d_in, d_out, n); hipMemcpy(h_out, d_out, memsize, hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); } int main(int argc, char **argv) { unsigned int *h_in; unsigned int *h_out; size_t memsize = sizeof(unsigned int) * TEST_SIZE; h_in = (unsigned int*)malloc(memsize); h_out = (unsigned int*)malloc(memsize); // Test values 1 .. TEST_SIZE for(int i=0; i<TEST_SIZE; i++){ h_in[i] = i+1; } // Compute totalScanHost(h_in, h_out, TEST_SIZE); // Print input printf("h_in = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_in[i]); } printf("];\n"); // Print output printf("h_out = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_out[i]); } printf("];\n"); free(h_in); free(h_out); return 0; }
6182969590b660201b24228c211c67d19394bdde.cu
#include <stdlib.h> #include <stdio.h> #define TEST_SIZE 35 #define BLOCK_WIDTH 4 #define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y))) // Computes a blockwise exclusive sum scan __global__ void partialScan(unsigned int *d_in, unsigned int *d_out, unsigned int *d_total, size_t n) { __shared__ unsigned int temp[BLOCK_WIDTH]; int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { temp[tx] = d_in[index]; } else { temp[tx] = 0; } __syncthreads(); // Perform the actual scan for(int offset = 1; offset < BLOCK_WIDTH; offset <<= 1) { if(tx + offset < BLOCK_WIDTH) { temp[tx + offset] += temp[tx]; } __syncthreads(); } // Shift when copying the result so as to make it an exclusive scan if(tx +1 < BLOCK_WIDTH && index + 1 < n) { d_out[index + 1] = temp[tx]; } d_out[0] = 0; // Store the total sum of each block d_total[bx] = temp[BLOCK_WIDTH - 1]; } // Compute a map on a partial scan to create a total scan from __global__ void mapScan(unsigned int *d_array, unsigned int *d_total, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { d_array[index] += d_total[bx]; } } // Compute exclusive sum scan for arbitrary sized array (device pointers as input) void totalScan(unsigned int *d_in, unsigned int *d_out, size_t n) { size_t numBlocks = CEILING_DIVIDE(n, BLOCK_WIDTH); unsigned int *d_total; cudaMalloc(&d_total, sizeof(unsigned int) * numBlocks); cudaMemset(d_total, 0, sizeof(unsigned int) * numBlocks); partialScan<<<numBlocks, BLOCK_WIDTH>>>(d_in, d_out, d_total, n); if(numBlocks > 1) { unsigned int *d_total_scanned; cudaMalloc(&d_total_scanned, sizeof(unsigned int) * numBlocks); cudaMemset(d_total_scanned, 0, sizeof(unsigned int) * numBlocks); totalScan(d_total, d_total_scanned, numBlocks); mapScan<<<numBlocks, BLOCK_WIDTH>>>(d_out, d_total_scanned, n); cudaFree(d_total_scanned); } cudaFree(d_total); } //////////////////////////////////////////////////////////////////////////////// // Wrapper for totalScan (host pointers as input) void totalScanHost(unsigned int *h_in, unsigned int *h_out, size_t n) { unsigned int *d_in; unsigned int *d_out; size_t memsize = sizeof(unsigned int) * n; cudaMalloc(&d_in, memsize); cudaMalloc(&d_out, memsize); cudaMemcpy(d_in, h_in, memsize, cudaMemcpyHostToDevice); totalScan(d_in, d_out, n); cudaMemcpy(h_out, d_out, memsize, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); } int main(int argc, char **argv) { unsigned int *h_in; unsigned int *h_out; size_t memsize = sizeof(unsigned int) * TEST_SIZE; h_in = (unsigned int*)malloc(memsize); h_out = (unsigned int*)malloc(memsize); // Test values 1 .. TEST_SIZE for(int i=0; i<TEST_SIZE; i++){ h_in[i] = i+1; } // Compute totalScanHost(h_in, h_out, TEST_SIZE); // Print input printf("h_in = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_in[i]); } printf("];\n"); // Print output printf("h_out = [ "); for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_out[i]); } printf("];\n"); free(h_in); free(h_out); return 0; }
a3818e12b3e4baf5f3e5551ff7f18862e8df9999.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <THH/THHAtomics.cuh> #include <stdio.h> #include <math.h> #include <numeric> #include <float.h> #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 512; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } void print_tsize(at::Tensor t, const char *msg) { printf("%s size: "); for (int i = 0; i < t.ndimension(); i++) { printf("%d ", int(t.size(i))); } printf("\n"); } // this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done // __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/ template <typename scalar_t> __device__ static inline int64_t get_target_prime( const scalar_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK ) { if (idx % 2 == 0) { return BLANK; } else { return target[offset + stride * (idx / 2)]; } } template <typename scalar_t> __device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b) { scalar_t m=((a > b) ? a : b); if (m == -INFINITY) m = 0; return (::log(::exp(a-m) + ::exp(b-m)) + m); } template <typename scalar_t> __global__ void ctc2d_log_alpha_gpu_kernel( const int64_t n, scalar_t* __restrict__ log_alpha_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, const int max_target_length, scalar_t* __restrict__ neg_log_likelihood_data, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1); if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; // log_probs_data ==> [T, H, N, C] // log_alpha_data ==> [N, T, H, 2*S+1] int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; scalar_t la; switch (s) { case 0: for (int64_t h=0; h < height; h++) { la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK]; if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } break; case 1: for (int64_t h=0; h < height; h++) { if (target_length > 0) { la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)]; } else { la = -INFINITY; } if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } break; default: la = -INFINITY; if (s < 2*max_target_length+1) { for (int64_t h=0; h < height; h++) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } } // These two only depend on s, so we can cache them. int64_t current_char; // l_s in eq (6) bool have_three; // flag which of the two cases in eq (6) we have if (s < 2*target_length+1) { current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char)); } else { current_char = BLANK; have_three = false; } for (int64_t t=1; t < max_input_length; t++) { // on cuda 9 we might use partial synchronization of only the threads within the same batch __syncthreads(); if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) { // only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands, // lamax is the maximum for the logsumexp trick. scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*s]; for (int64_t h=1; h < height; h++) { la1 = safe_log_add(la1, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*s]); } scalar_t lamax = la1; scalar_t la2, la3; if (s > 0) { la2 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*(s-1)]; for (int64_t h=1; h < height; h++) { la2 = safe_log_add(la2, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*(s-1)]); } if (la2 > lamax) lamax = la2; } else { la2 = -INFINITY; } if (have_three) { la3 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*(s-2)]; for (int64_t h=1; h < height; h++) { la3 = safe_log_add(la3, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*(s-2)]); } if (la3 > lamax) lamax = la3; } else { la3 = -INFINITY; } // when all are neginf. (then the whole thing is neginf, but we can pretend) if (lamax == -INFINITY) lamax = 0; for (int64_t h=0; h < height; h++) { log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s] = ::log(::exp(la1-lamax) + ::exp(la2-lamax) + ::exp(la3-lamax)) + lamax + log_probs_data[lp_input_stride*t + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_char]; } } else { // otherwise we just set to neginf if (s < 2*max_target_length+1) { for (int64_t h = 0; h < height; h++) { log_alpha_data[la_batch_offset + la_input_stride * t + la_height_stride * h + la_target_stride * s] = -INFINITY; } } } } // on cuda 9 we might use partial synchronization of only the threads within the same batch __syncthreads(); // compute the loss (eq (8)) if (s == 0) { scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*0 + la_target_stride*(target_length*2)]; for (int64_t h=1; h < height; h++) { l1 = safe_log_add(l1, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*h + la_target_stride*(target_length*2)]); } scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*0 + la_target_stride*(target_length*2-1)]; for (int64_t h=1; h < height; h++) { l2 = safe_log_add(l2, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*h + la_target_stride*(target_length*2-1)]); } scalar_t m = ((l1 > l2) ? l1 : l2); if (m == -INFINITY) m = 0; scalar_t log_likelihood = ::log(::exp(l1-m)+::exp(l2-m))+m; neg_log_likelihood_data[b] = -log_likelihood; } } } std::tuple<at::Tensor, at::Tensor> ctc2d_gpu_template( const at::Tensor log_probs, const at::Tensor targets, const at::Tensor input_lengths, const at::Tensor target_lengths, int64_t BLANK, float TINY ) { int64_t max_target_length = targets.size(1); AT_CHECK((2 * max_target_length + 1) <= CUDA_NUM_THREADS, "max target length out of range, got ", max_target_length, ", must less than ", CUDA_NUM_THREADS); int64_t max_input_length = log_probs.size(0); int64_t height = log_probs.size(1); int64_t batch_size = log_probs.size(2); int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1); const int num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; // N T H 2*S+1 at::Tensor log_alpha = at::zeros( {batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1}, log_probs.options() ); at::Tensor neg_log_likelihood = at::zeros({batch_size}, log_probs.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_alpha_gpu_template", ([&] { hipLaunchKernelGGL(( ctc2d_log_alpha_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK ); })); return std::make_tuple(neg_log_likelihood, log_alpha); } template <typename scalar_t> __global__ void ctc2d_log_beta_gpu_kernel( const int64_t n, scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, const int max_target_length, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1); if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; // log_probs_data ==> [T, H, N, C] // log_beta_data ==> [N, T, H, 2*S+1] int64_t lp_batch_offset = b*lp_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; scalar_t lb; if (s == 2*target_length) { for (int64_t h=0; h < height; h++) { lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK]; log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb; } } else if ((target_length > 0) && (s == 2*target_length-1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); for (int64_t h=0; h < height; h++) { lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_target_prime]; log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb; } } else { for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = -INFINITY; } } int64_t current_target_prime; bool have_three; if (s < 2*target_length+1) { current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s < 2*target_length-1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) != current_target_prime)); } else { current_target_prime = BLANK; have_three = false; } // now go backward in t. Note that we need to skip the last timestep that we did above. for (int64_t t=max_input_length-2; t>=0; t--) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) { scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*s]; for (int64_t h=1; h < height; h++) { lb1 = safe_log_add(lb1, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*s]); } scalar_t lbmax = lb1; scalar_t lb2, lb3; if (s < 2*target_length) { lb2 = log_beta_data[ lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+1)]; for (int64_t h=1; h < height; h++) { lb2 = safe_log_add(lb2, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*(s+1)]); } if (lb2 > lbmax) lbmax = lb2; } else { lb2 = -INFINITY; } if (have_three) { lb3 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+2)]; for (int64_t h=1; h < height; h++) { lb3 = safe_log_add(lb3, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*(s+2)]); } if (lb3 > lbmax) lbmax = lb3; } else { lb3 = -INFINITY; } if (lbmax == -INFINITY) lbmax = 0; scalar_t tmp = ::log(::exp(lb1-lbmax) + ::exp(lb2-lbmax) + ::exp(lb3-lbmax)) + lbmax; for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = tmp + log_probs_data[lp_input_stride*t + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_target_prime]; } } else if ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length)) { for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = -INFINITY; } } } } } template <typename scalar_t> __global__ void ctc2d_backward_collect_nonblank_gpu_kernel( const int64_t n, scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / max_target_length + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % max_target_length; if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; if (s >= target_length) return; int64_t target = targets_data[tg_batch_offset + s*tg_target_stride]; scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; if (zero_infinity && nll == INFINITY) return; for (int64_t t = 0; t < input_length; t++) { for (int64_t h = 0; h < height; h++) { scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*target]; atomicAdd(&gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*target], -::exp(log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*(s*2+1)] + log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*(s*2+1)] + nll - lp) * gr); } } } } template <typename scalar_t> __global__ void ctc2d_backward_collect_gpu_kernel( const int64_t n, scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / max_input_length + blockIdx.x*batch_per_block; int64_t t = (index - blockIdx.x*blockDim.x) % max_input_length; if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; // collected[b, t, h, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s] for (int64_t s = 0; s < 2*max_target_length+1; s++) { if ((target_length > 0) && (s < 2*target_length+1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); /*scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*0 + la_target_stride*s]; for (int64_t h = 1; h < height; h++) { laaa = safe_log_add(laaa, log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s]); } scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*0 + lb_target_stride*s]; for (int64_t h = 1; h < height; h++) { lbbb = safe_log_add(lbbb, log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s]); }*/ for (int64_t h = 0; h < height; h++) { scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s]; scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s]; scalar_t log_alpha_beta = laaa + lbbb; scalar_t& lcab = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*current_target_prime]; if (lcab == -INFINITY) { lcab = log_alpha_beta; } else { scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta); lcab = ::log(::exp(lcab-max)+::exp(log_alpha_beta-max))+max; } } } } scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; for (int64_t c = 0; c < num_labels; c++) { for (int64_t h = 0; h < height; h++) { scalar_t& res = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*c]; if (t < input_length && (! zero_infinity || nll != INFINITY)) { scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*c]; if (res == -INFINITY) res = 0; else res = (::exp(lp) - ::exp(res + nll - lp)) * gr; } else { res = 0.; } } } } } at::Tensor ctc2d_gpu_backward_template( const at::Tensor grad_out, const at::Tensor log_probs, const at::Tensor targets, const at::Tensor input_lengths, const at::Tensor target_lengths, const at::Tensor neg_log_likelihood, const at::Tensor log_alpha, int64_t BLANK ) { bool zero_infinity = 0; int64_t max_target_length = targets.size(1); int64_t max_input_length = log_probs.size(0); int64_t height = log_probs.size(1); int64_t batch_size = log_probs.size(2); int64_t num_labels = log_probs.size(3); int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1); int64_t num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; at::Tensor log_beta = at::zeros( {batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1}, log_probs.options() ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_beta_gpu_template", ([&] { hipLaunchKernelGGL(( ctc2d_log_beta_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK ); })); at::Tensor grad = at::full_like(log_probs, -INFINITY); // bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450; bool is_large = 0; if (is_large) { // large alphabet, large batch // std::cout << "+++Large+++" << std::endl; // this computes the probs, minuend in (16) exp_out(grad, log_probs); // now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that // blanks are in every other position. // maybe we should kernelize this, too. auto grad_blank = grad.narrow(3, BLANK, 1); grad_blank -= (at::logsumexp( log_alpha.as_strided({batch_size, log_alpha.size(1), log_alpha.size(2), max_target_length+1}, {log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3)*2}) + log_beta.as_strided({batch_size, log_beta.size(1), log_beta.size(2), max_target_length+1}, {log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3)*2}), 3, true) .permute({1, 2, 0, 3}) .add_(neg_log_likelihood.view({1, 1, batch_size, 1})) .sub_(log_probs.narrow(3, BLANK, 1)) .exp_() ); grad *= grad_out.view({1, 1, batch_size, 1}); // For the non-blank characters, we use a kernel to compute the subtrahend. // Again we might configure block and grid in a better way. batch_per_block = CUDA_NUM_THREADS / max_target_length; num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_nonblank", ([&] { hipLaunchKernelGGL(( ctc2d_backward_collect_nonblank_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK, zero_infinity ); })); } else { // small problem, use naive algorithm // std::cout << "+++Small+++" << std::endl; batch_per_block = CUDA_NUM_THREADS / max_input_length; num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_all", ([&] { hipLaunchKernelGGL(( ctc2d_backward_collect_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, num_labels, batch_per_block, height, BLANK, zero_infinity ); })); } return grad; }
a3818e12b3e4baf5f3e5551ff7f18862e8df9999.cu
#include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <THC/THCAtomics.cuh> #include <stdio.h> #include <math.h> #include <numeric> #include <float.h> #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 512; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } void print_tsize(at::Tensor t, const char *msg) { printf("%s size: "); for (int i = 0; i < t.ndimension(); i++) { printf("%d ", int(t.size(i))); } printf("\n"); } // this ad-hoc converts from targets (l in [1]) to augmented targets (l' in [1]) note that no bound-checking is done // __restrict__ impact to be measured, https://devblogs.nvidia.com/cuda-pro-tip-optimize-pointer-aliasing/ template <typename scalar_t> __device__ static inline int64_t get_target_prime( const scalar_t* __restrict__ target, int64_t offset, int64_t stride, int64_t idx, int64_t BLANK ) { if (idx % 2 == 0) { return BLANK; } else { return target[offset + stride * (idx / 2)]; } } template <typename scalar_t> __device__ static inline scalar_t safe_log_add(scalar_t a, scalar_t b) { scalar_t m=((a > b) ? a : b); if (m == -INFINITY) m = 0; return (std::log(std::exp(a-m) + std::exp(b-m)) + m); } template <typename scalar_t> __global__ void ctc2d_log_alpha_gpu_kernel( const int64_t n, scalar_t* __restrict__ log_alpha_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, const int max_target_length, scalar_t* __restrict__ neg_log_likelihood_data, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1); if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; // log_probs_data ==> [T, H, N, C] // log_alpha_data ==> [N, T, H, 2*S+1] int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; scalar_t la; switch (s) { case 0: for (int64_t h=0; h < height; h++) { la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK]; if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } break; case 1: for (int64_t h=0; h < height; h++) { if (target_length > 0) { la = log_probs_data[lp_height_stride*h + lp_batch_offset + lp_char_stride*get_target_prime(targets_data, tg_batch_offset, tg_target_stride, 1, BLANK)]; } else { la = -INFINITY; } if (s < 2*max_target_length+1) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } break; default: la = -INFINITY; if (s < 2*max_target_length+1) { for (int64_t h=0; h < height; h++) log_alpha_data[la_batch_offset + la_height_stride*h + la_target_stride*s] = la; } } // These two only depend on s, so we can cache them. int64_t current_char; // l_s in eq (6) bool have_three; // flag which of the two cases in eq (6) we have if (s < 2*target_length+1) { current_char = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s > 1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s-2, BLANK) != current_char)); } else { current_char = BLANK; have_three = false; } for (int64_t t=1; t < max_input_length; t++) { // on cuda 9 we might use partial synchronization of only the threads within the same batch __syncthreads(); if ((t < input_length) && (target_length > 0) && (s < 2*target_length+1)) { // only for valid t, s. This is equation (6) and (7), la1, la2, la3 are the three summands, // lamax is the maximum for the logsumexp trick. scalar_t la1 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*s]; for (int64_t h=1; h < height; h++) { la1 = safe_log_add(la1, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*s]); } scalar_t lamax = la1; scalar_t la2, la3; if (s > 0) { la2 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*(s-1)]; for (int64_t h=1; h < height; h++) { la2 = safe_log_add(la2, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*(s-1)]); } if (la2 > lamax) lamax = la2; } else { la2 = -INFINITY; } if (have_three) { la3 = log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*0 + la_target_stride*(s-2)]; for (int64_t h=1; h < height; h++) { la3 = safe_log_add(la3, log_alpha_data[la_batch_offset + la_input_stride*(t-1) + la_height_stride*h + la_target_stride*(s-2)]); } if (la3 > lamax) lamax = la3; } else { la3 = -INFINITY; } // when all are neginf. (then the whole thing is neginf, but we can pretend) if (lamax == -INFINITY) lamax = 0; for (int64_t h=0; h < height; h++) { log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s] = std::log(std::exp(la1-lamax) + std::exp(la2-lamax) + std::exp(la3-lamax)) + lamax + log_probs_data[lp_input_stride*t + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_char]; } } else { // otherwise we just set to neginf if (s < 2*max_target_length+1) { for (int64_t h = 0; h < height; h++) { log_alpha_data[la_batch_offset + la_input_stride * t + la_height_stride * h + la_target_stride * s] = -INFINITY; } } } } // on cuda 9 we might use partial synchronization of only the threads within the same batch __syncthreads(); // compute the loss (eq (8)) if (s == 0) { scalar_t l1 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*0 + la_target_stride*(target_length*2)]; for (int64_t h=1; h < height; h++) { l1 = safe_log_add(l1, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*h + la_target_stride*(target_length*2)]); } scalar_t l2 = log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*0 + la_target_stride*(target_length*2-1)]; for (int64_t h=1; h < height; h++) { l2 = safe_log_add(l2, log_alpha_data[la_batch_offset + la_input_stride*(input_length-1) + la_height_stride*h + la_target_stride*(target_length*2-1)]); } scalar_t m = ((l1 > l2) ? l1 : l2); if (m == -INFINITY) m = 0; scalar_t log_likelihood = std::log(std::exp(l1-m)+std::exp(l2-m))+m; neg_log_likelihood_data[b] = -log_likelihood; } } } std::tuple<at::Tensor, at::Tensor> ctc2d_gpu_template( const at::Tensor log_probs, const at::Tensor targets, const at::Tensor input_lengths, const at::Tensor target_lengths, int64_t BLANK, float TINY ) { int64_t max_target_length = targets.size(1); AT_CHECK((2 * max_target_length + 1) <= CUDA_NUM_THREADS, "max target length out of range, got ", max_target_length, ", must less than ", CUDA_NUM_THREADS); int64_t max_input_length = log_probs.size(0); int64_t height = log_probs.size(1); int64_t batch_size = log_probs.size(2); int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1); const int num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; // N T H 2*S+1 at::Tensor log_alpha = at::zeros( {batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1}, log_probs.options() ); at::Tensor neg_log_likelihood = at::zeros({batch_size}, log_probs.options()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_alpha_gpu_template", ([&] { ctc2d_log_alpha_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, log_alpha.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK ); })); return std::make_tuple(neg_log_likelihood, log_alpha); } template <typename scalar_t> __global__ void ctc2d_log_beta_gpu_kernel( const int64_t n, scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, const int max_target_length, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / (2*max_target_length+1) + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % (2*max_target_length+1); if ((b >= batch_size) || (b >= (blockIdx.x+1)*batch_per_block)) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; // log_probs_data ==> [T, H, N, C] // log_beta_data ==> [N, T, H, 2*S+1] int64_t lp_batch_offset = b*lp_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; scalar_t lb; if (s == 2*target_length) { for (int64_t h=0; h < height; h++) { lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*BLANK]; log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb; } } else if ((target_length > 0) && (s == 2*target_length-1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); for (int64_t h=0; h < height; h++) { lb = log_probs_data[lp_input_stride*(input_length-1) + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_target_prime]; log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = lb; } } else { for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*(input_length-1) + lb_height_stride*h + lb_target_stride*s] = -INFINITY; } } int64_t current_target_prime; bool have_three; if (s < 2*target_length+1) { current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); have_three = ((s < 2*target_length-1) && (get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s+2, BLANK) != current_target_prime)); } else { current_target_prime = BLANK; have_three = false; } // now go backward in t. Note that we need to skip the last timestep that we did above. for (int64_t t=max_input_length-2; t>=0; t--) { __syncthreads(); // on cuda 9 we might use partial synchronization of only the threads within the same batch item if ((t < input_length-1) && (target_length > 0) && (s < 2*target_length+1)) { scalar_t lb1 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*s]; for (int64_t h=1; h < height; h++) { lb1 = safe_log_add(lb1, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*s]); } scalar_t lbmax = lb1; scalar_t lb2, lb3; if (s < 2*target_length) { lb2 = log_beta_data[ lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+1)]; for (int64_t h=1; h < height; h++) { lb2 = safe_log_add(lb2, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*(s+1)]); } if (lb2 > lbmax) lbmax = lb2; } else { lb2 = -INFINITY; } if (have_three) { lb3 = log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*0 + lb_target_stride*(s+2)]; for (int64_t h=1; h < height; h++) { lb3 = safe_log_add(lb3, log_beta_data[lb_batch_offset + lb_input_stride*(t+1) + lb_height_stride*h + lb_target_stride*(s+2)]); } if (lb3 > lbmax) lbmax = lb3; } else { lb3 = -INFINITY; } if (lbmax == -INFINITY) lbmax = 0; scalar_t tmp = std::log(std::exp(lb1-lbmax) + std::exp(lb2-lbmax) + std::exp(lb3-lbmax)) + lbmax; for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = tmp + log_probs_data[lp_input_stride*t + lp_height_stride*h + lp_batch_offset + lp_char_stride*current_target_prime]; } } else if ((target_length == 0) || (s > 2*target_length+1) || (t >= input_length)) { for (int64_t h=0; h < height; h++) { log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s] = -INFINITY; } } } } } template <typename scalar_t> __global__ void ctc2d_backward_collect_nonblank_gpu_kernel( const int64_t n, scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / max_target_length + blockIdx.x*batch_per_block; int64_t s = (index - blockIdx.x*blockDim.x) % max_target_length; if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; if (s >= target_length) return; int64_t target = targets_data[tg_batch_offset + s*tg_target_stride]; scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; if (zero_infinity && nll == INFINITY) return; for (int64_t t = 0; t < input_length; t++) { for (int64_t h = 0; h < height; h++) { scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*target]; atomicAdd(&gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*target], -std::exp(log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*(s*2+1)] + log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*(s*2+1)] + nll - lp) * gr); } } } } template <typename scalar_t> __global__ void ctc2d_backward_collect_gpu_kernel( const int64_t n, scalar_t* __restrict__ gradient_data, const scalar_t* __restrict__ grad_out_data, int64_t grad_out_batch_stride, const scalar_t* __restrict__ log_alpha_data, const scalar_t* __restrict__ log_beta_data, const scalar_t* log_probs_data, const int64_t* __restrict__ input_lengths, int64_t max_input_length, const int64_t* __restrict__ targets_data, const int64_t* __restrict__ target_lengths, int64_t max_target_length, const scalar_t* __restrict__ neg_log_likelihood_data, int64_t gr_input_stride, int64_t gr_height_stride, int64_t gr_batch_stride, int64_t gr_char_stride, int64_t lp_input_stride, int64_t lp_height_stride, int64_t lp_batch_stride, int64_t lp_char_stride, int64_t la_batch_stride, int64_t la_input_stride, int64_t la_height_stride, int64_t la_target_stride, int64_t lb_batch_stride, int64_t lb_input_stride, int64_t lb_height_stride, int64_t lb_target_stride, int64_t tg_batch_stride, int64_t tg_target_stride, int64_t batch_size, int64_t num_labels, int64_t batch_per_block, int64_t height, int64_t BLANK, bool zero_infinity ) { CUDA_KERNEL_LOOP(index, n) { int64_t b = (index - blockIdx.x*blockDim.x) / max_input_length + blockIdx.x*batch_per_block; int64_t t = (index - blockIdx.x*blockDim.x) % max_input_length; if (b >= batch_size) return; int64_t input_length = input_lengths[b]; int64_t target_length = target_lengths[b]; int64_t gr_batch_offset = b*gr_batch_stride; int64_t lp_batch_offset = b*lp_batch_stride; int64_t la_batch_offset = b*la_batch_stride; int64_t lb_batch_offset = b*lb_batch_stride; int64_t tg_batch_offset = b*tg_batch_stride; // collected[b, t, h, target'[s]] "log+=" log_alpha[t, s]+log_beta[t, s] for (int64_t s = 0; s < 2*max_target_length+1; s++) { if ((target_length > 0) && (s < 2*target_length+1)) { int64_t current_target_prime = get_target_prime(targets_data, tg_batch_offset, tg_target_stride, s, BLANK); /*scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*0 + la_target_stride*s]; for (int64_t h = 1; h < height; h++) { laaa = safe_log_add(laaa, log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s]); } scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*0 + lb_target_stride*s]; for (int64_t h = 1; h < height; h++) { lbbb = safe_log_add(lbbb, log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s]); }*/ for (int64_t h = 0; h < height; h++) { scalar_t laaa = log_alpha_data[la_batch_offset + la_input_stride*t + la_height_stride*h + la_target_stride*s]; scalar_t lbbb = log_beta_data[lb_batch_offset + lb_input_stride*t + lb_height_stride*h + lb_target_stride*s]; scalar_t log_alpha_beta = laaa + lbbb; scalar_t& lcab = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*current_target_prime]; if (lcab == -INFINITY) { lcab = log_alpha_beta; } else { scalar_t max = ((lcab > log_alpha_beta) ? lcab : log_alpha_beta); lcab = std::log(std::exp(lcab-max)+std::exp(log_alpha_beta-max))+max; } } } } scalar_t nll = neg_log_likelihood_data[b]; scalar_t gr = grad_out_data[b * grad_out_batch_stride]; for (int64_t c = 0; c < num_labels; c++) { for (int64_t h = 0; h < height; h++) { scalar_t& res = gradient_data[gr_batch_offset + gr_input_stride*t + gr_height_stride*h + gr_char_stride*c]; if (t < input_length && (! zero_infinity || nll != INFINITY)) { scalar_t lp = log_probs_data[lp_batch_offset + lp_input_stride*t + lp_height_stride*h + lp_char_stride*c]; if (res == -INFINITY) res = 0; else res = (std::exp(lp) - std::exp(res + nll - lp)) * gr; } else { res = 0.; } } } } } at::Tensor ctc2d_gpu_backward_template( const at::Tensor grad_out, const at::Tensor log_probs, const at::Tensor targets, const at::Tensor input_lengths, const at::Tensor target_lengths, const at::Tensor neg_log_likelihood, const at::Tensor log_alpha, int64_t BLANK ) { bool zero_infinity = 0; int64_t max_target_length = targets.size(1); int64_t max_input_length = log_probs.size(0); int64_t height = log_probs.size(1); int64_t batch_size = log_probs.size(2); int64_t num_labels = log_probs.size(3); int64_t batch_per_block = CUDA_NUM_THREADS / (2 * max_target_length + 1); int64_t num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; at::Tensor log_beta = at::zeros( {batch_size, log_probs.size(0), log_probs.size(1), 2*max_target_length+1}, log_probs.options() ); AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_log_beta_gpu_template", ([&] { ctc2d_log_beta_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK ); })); at::Tensor grad = at::full_like(log_probs, -INFINITY); // bool is_large = (2*log_probs.size(0)+(24*batch_size)/10+(2*num_labels)/10) > 450; bool is_large = 0; if (is_large) { // large alphabet, large batch // std::cout << "+++Large+++" << std::endl; // this computes the probs, minuend in (16) exp_out(grad, log_probs); // now we compute the subtrahend for the blanks. It is a straightforward reduction because we know that // blanks are in every other position. // maybe we should kernelize this, too. auto grad_blank = grad.narrow(3, BLANK, 1); grad_blank -= (at::logsumexp( log_alpha.as_strided({batch_size, log_alpha.size(1), log_alpha.size(2), max_target_length+1}, {log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3)*2}) + log_beta.as_strided({batch_size, log_beta.size(1), log_beta.size(2), max_target_length+1}, {log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3)*2}), 3, true) .permute({1, 2, 0, 3}) .add_(neg_log_likelihood.view({1, 1, batch_size, 1})) .sub_(log_probs.narrow(3, BLANK, 1)) .exp_() ); grad *= grad_out.view({1, 1, batch_size, 1}); // For the non-blank characters, we use a kernel to compute the subtrahend. // Again we might configure block and grid in a better way. batch_per_block = CUDA_NUM_THREADS / max_target_length; num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_nonblank", ([&] { ctc2d_backward_collect_nonblank_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, batch_per_block, height, BLANK, zero_infinity ); })); } else { // small problem, use naive algorithm // std::cout << "+++Small+++" << std::endl; batch_per_block = CUDA_NUM_THREADS / max_input_length; num_kernels = (batch_size + batch_per_block - 1) / batch_per_block * CUDA_NUM_THREADS; AT_DISPATCH_FLOATING_TYPES_AND_HALF(log_probs.type(), "ctc2d_collect_all", ([&] { ctc2d_backward_collect_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, grad.data<scalar_t>(), grad_out.data<scalar_t>(), grad_out.stride(0), log_alpha.data<scalar_t>(), log_beta.data<scalar_t>(), log_probs.data<scalar_t>(), input_lengths.data<int64_t>(), max_input_length, targets.data<int64_t>(), target_lengths.data<int64_t>(), max_target_length, neg_log_likelihood.data<scalar_t>(), grad.stride(0), grad.stride(1), grad.stride(2), grad.stride(3), log_probs.stride(0), log_probs.stride(1), log_probs.stride(2), log_probs.stride(3), log_alpha.stride(0), log_alpha.stride(1), log_alpha.stride(2), log_alpha.stride(3), log_beta.stride(0), log_beta.stride(1), log_beta.stride(2), log_beta.stride(3), targets.stride(0), targets.stride(1), batch_size, num_labels, batch_per_block, height, BLANK, zero_infinity ); })); } return grad; }
c61c2de6bcb93127603e19bfb3490b6865871651.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_computeModelMany2_scmos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int sizeImage = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *amplitude = NULL; hipMalloc(&amplitude, XSIZE*YSIZE); double *background = NULL; hipMalloc(&background, XSIZE*YSIZE); double *scmos = NULL; hipMalloc(&scmos, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_computeModelMany2_scmos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background,scmos); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_computeModelMany2_scmos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background,scmos); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_computeModelMany2_scmos), dim3(gridBlock),dim3(threadBlock), 0, 0, n,sizeImage,result,x,amplitude,background,scmos); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c61c2de6bcb93127603e19bfb3490b6865871651.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_computeModelMany2_scmos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int sizeImage = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *amplitude = NULL; cudaMalloc(&amplitude, XSIZE*YSIZE); double *background = NULL; cudaMalloc(&background, XSIZE*YSIZE); double *scmos = NULL; cudaMalloc(&scmos, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_computeModelMany2_scmos<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background,scmos); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_computeModelMany2_scmos<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background,scmos); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_computeModelMany2_scmos<<<gridBlock,threadBlock>>>(n,sizeImage,result,x,amplitude,background,scmos); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bc0b80f0ac84a1f8e366d9d5915ce1f2769b4f2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> #include <vector> using namespace std; const int INF = 10000000; const int V = 10010; const int MAX_THREAD_DIM2 = 32; void input(char *inFileName, int B); void output(char *outFileName); void block_FW(int B); int ceil(int a, int b); void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); int realn; int n, m; // Number of vertices, edges int* Dist; // n * n, on host int* dDist; // n * n, on device int streamSize; vector<hipStream_t> streams; inline hipStream_t getIdleStream () { if(streams.size() == streamSize) { hipStream_t stm; hipStreamCreate(&stm); streams.push_back(stm); streamSize++; return stm; } else return streams[streamSize++]; } inline void syncAllStreams () { hipDeviceSynchronize(); streamSize = 0; } int main(int argc, char* argv[]) { int B = atoi(argv[3]); input(argv[1], B); // if(B > n) // { // B = n; // cerr << "Warning: B > n. Set B = n."; // } block_FW(B); output(argv[2]); return 0; } void input(char *inFileName, int B) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &realn, &m); n = ceil(realn, B) * B; Dist = new int[n * n]; for (int i = 0, k = 0; i < n; ++i) { for (int j = 0; j < n; ++j, ++k) { if (i == j) Dist[k] = 0; else Dist[k] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a * n + b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < realn; ++i) { for (int j = 0; j < realn; ++j) { int d = Dist[i * n + j]; if (d >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", d); } fprintf(outfile, "\n"); } delete[] Dist; } void print () { for (int i = 0; i < realn; ++i) { for (int j = 0; j < realn; ++j) { int d = Dist[i * n + j]; if (d >= INF) fprintf(stderr, "INF "); else fprintf(stderr, "%d ", d); } fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } inline int ceil(int a, int b) { return (a + b -1)/b; } inline __device__ void updateMin (int &x, int a) { if(a < x) x = a; } __global__ void UpdateIKJ32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int i = r * 32 + tx; int j = r * 32 + ty; __shared__ int S[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S[tx][ty] = D(i, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S[tx][ty], S[tx][k] + S[k][ty]); __syncthreads(); } D(i, j) = S[tx][ty]; #undef D } __global__ void UpdateIK32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int by = blockIdx.x; if(by >= r) by++; int i = r * 32 + tx; int j = by * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, r*32 + ty); S1[tx][ty] = D(i, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S1[tx][ty], S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = S1[tx][ty]; #undef D } __global__ void UpdateKJ32 (int r, int* dDist, int n) // 0 --update--> 1 { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; if(bx >= r) bx++; int i = bx * 32 + tx; int j = r * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, j); S1[tx][ty] = D(r*32 + tx, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S0[ty][tx], S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = S0[ty][tx]; #undef D } __global__ void Update32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; if(bx >= r) bx++; if(by >= r) by++; int i = bx * 32 + tx; int j = by * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, r * 32 + ty); S1[tx][ty] = D(r * 32 + tx, j); __syncthreads(); int Dij = D(i, j); for(int k=0; k<32; ++k) { updateMin(Dij, S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = Dij; #undef D } void block_FW(int B) { int *dPivot; hipMalloc(&dDist, sizeof(int) * n * n); hipMalloc(&dPivot, sizeof(int) * B * B); hipMemcpy(dDist, Dist, sizeof(int) * n * n, hipMemcpyHostToDevice); int round = ceil(n, B); if(B == 32) { for (int r = 0; r < round; ++r) { /* Phase 1*/ hipLaunchKernelGGL(( UpdateIKJ32) , dim3(1), dim3(dim3(32,32)) , 0, 0, r, dDist, n); /* Phase 2*/ hipLaunchKernelGGL(( UpdateIK32) , dim3(round-1), dim3(dim3(32,32)), 0, getIdleStream() , r, dDist, n); hipLaunchKernelGGL(( UpdateKJ32) , dim3(round-1), dim3(dim3(32,32)), 0, getIdleStream() , r, dDist, n); syncAllStreams(); /* Phase 3*/ hipLaunchKernelGGL(( Update32) , dim3(dim3(round-1, round-1)), dim3(dim3(32,32)) , 0, 0, r, dDist, n); } } else for (int r = 0; r < round; ++r) { /* Phase 1*/ calAsync(B, r, r, r, 1, 1); syncAllStreams(); /* Phase 2*/ calAsync(B, r, r, 0, r, 1); calAsync(B, r, r, r +1, round - r -1, 1); calAsync(B, r, 0, r, 1, r); calAsync(B, r, r +1, r, 1, round - r -1); syncAllStreams(); /* Phase 3*/ calAsync(B, r, 0, 0, r, r); calAsync(B, r, 0, r +1, round -r -1, r); calAsync(B, r, r +1, 0, r, round - r -1); calAsync(B, r, r +1, r +1, round -r -1, round - r -1); syncAllStreams(); } hipMemcpy(Dist, dDist, sizeof(int) * n * n, hipMemcpyDeviceToHost); hipFree(dDist); hipFree(dPivot); } __global__ void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int i = blockDim.x * blockIdx.x + threadIdx.x + i0; int j = blockDim.y * blockIdx.y + threadIdx.y + j0; if(i >= i1 || j >= j1) return; updateMin(D(i, j), D(i, k) + D(k, j)); } __global__ void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int di = blockDim.x * blockIdx.x + tx; int dj = blockDim.y * blockIdx.y + ty; int i = i0 + di; int j = j0 + dj; bool valid = i < i1 && j < j1; __shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; const int cacheSize = MAX_THREAD_DIM2; int Dij = valid? D(i, j): 0; int dkmod = 0; for(int k = k0; k < k1; ++k) { if(dkmod == 0) { __syncthreads(); if(i < i1 && k+ty < k1) Si[ty][tx] = D(i, k+ty); if(j < j1 && k+tx < k1) Sj[tx][ty] = D(k+tx, j); __syncthreads(); } if(valid) { // assert(Si[tx][dkmod] == D(i,k)); // assert(Sj[dkmod][ty] == D(k,j)); // int Dik = D(i, k); // int Dkj = D(k, j); int Dik = Si[dkmod][tx]; int Dkj = Sj[dkmod][ty]; updateMin(Dij, Dik + Dkj); } dkmod = (dkmod + 1) % cacheSize; } if(valid) D(i, j) = Dij; } void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; int block_total = block_width * block_height; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times // for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int i0 = b_i * B; int i1 = min((b_i +1) * B, n); int j0 = b_j * B; int j1 = min((b_j +1) * B, n); int k0 = Round * B; int k1 = min((Round +1) * B, n); bool iDepends = i0 == k0; bool jDepends = j0 == k0; int threadDim = MAX_THREAD_DIM2;//::min(B, MAX_THREAD_DIM2); int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2; dim3 grid(blockDim, blockDim), block(threadDim, threadDim); hipStream_t stm = getIdleStream(); if(iDepends || jDepends) { for(int k=k0; k<k1; ++k) hipLaunchKernelGGL(( Update), dim3(grid), dim3(block), 0, stm, k, i0, j0, i1, j1, dDist, n); } else hipLaunchKernelGGL(( UpdateIndependent), dim3(grid), dim3(block), 0, stm, k0, k1, i0, j0, i1, j1, dDist, n); // for (int i = i0; i < i1; ++i) { // for (int j = j0; j < j1; ++j) { // if (Dist[i][k] + Dist[k][j] < Dist[i][j]) // Dist[i][j] = Dist[i][k] + Dist[k][j]; // } // } // } } } }
bc0b80f0ac84a1f8e366d9d5915ce1f2769b4f2c.cu
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <assert.h> #include <vector> using namespace std; const int INF = 10000000; const int V = 10010; const int MAX_THREAD_DIM2 = 32; void input(char *inFileName, int B); void output(char *outFileName); void block_FW(int B); int ceil(int a, int b); void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height); int realn; int n, m; // Number of vertices, edges int* Dist; // n * n, on host int* dDist; // n * n, on device int streamSize; vector<cudaStream_t> streams; inline cudaStream_t getIdleStream () { if(streams.size() == streamSize) { cudaStream_t stm; cudaStreamCreate(&stm); streams.push_back(stm); streamSize++; return stm; } else return streams[streamSize++]; } inline void syncAllStreams () { cudaThreadSynchronize(); streamSize = 0; } int main(int argc, char* argv[]) { int B = atoi(argv[3]); input(argv[1], B); // if(B > n) // { // B = n; // cerr << "Warning: B > n. Set B = n."; // } block_FW(B); output(argv[2]); return 0; } void input(char *inFileName, int B) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &realn, &m); n = ceil(realn, B) * B; Dist = new int[n * n]; for (int i = 0, k = 0; i < n; ++i) { for (int j = 0; j < n; ++j, ++k) { if (i == j) Dist[k] = 0; else Dist[k] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a * n + b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < realn; ++i) { for (int j = 0; j < realn; ++j) { int d = Dist[i * n + j]; if (d >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", d); } fprintf(outfile, "\n"); } delete[] Dist; } void print () { for (int i = 0; i < realn; ++i) { for (int j = 0; j < realn; ++j) { int d = Dist[i * n + j]; if (d >= INF) fprintf(stderr, "INF "); else fprintf(stderr, "%d ", d); } fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } inline int ceil(int a, int b) { return (a + b -1)/b; } inline __device__ void updateMin (int &x, int a) { if(a < x) x = a; } __global__ void UpdateIKJ32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int i = r * 32 + tx; int j = r * 32 + ty; __shared__ int S[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S[tx][ty] = D(i, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S[tx][ty], S[tx][k] + S[k][ty]); __syncthreads(); } D(i, j) = S[tx][ty]; #undef D } __global__ void UpdateIK32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int by = blockIdx.x; if(by >= r) by++; int i = r * 32 + tx; int j = by * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, r*32 + ty); S1[tx][ty] = D(i, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S1[tx][ty], S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = S1[tx][ty]; #undef D } __global__ void UpdateKJ32 (int r, int* dDist, int n) // 0 --update--> 1 { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; if(bx >= r) bx++; int i = bx * 32 + tx; int j = r * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, j); S1[tx][ty] = D(r*32 + tx, j); __syncthreads(); for(int k=0; k<32; ++k) { updateMin(S0[ty][tx], S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = S0[ty][tx]; #undef D } __global__ void Update32 (int r, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; if(bx >= r) bx++; if(by >= r) by++; int i = bx * 32 + tx; int j = by * 32 + ty; __shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; S0[ty][tx] = D(i, r * 32 + ty); S1[tx][ty] = D(r * 32 + tx, j); __syncthreads(); int Dij = D(i, j); for(int k=0; k<32; ++k) { updateMin(Dij, S0[k][tx] + S1[k][ty]); __syncthreads(); } D(i, j) = Dij; #undef D } void block_FW(int B) { int *dPivot; cudaMalloc(&dDist, sizeof(int) * n * n); cudaMalloc(&dPivot, sizeof(int) * B * B); cudaMemcpy(dDist, Dist, sizeof(int) * n * n, cudaMemcpyHostToDevice); int round = ceil(n, B); if(B == 32) { for (int r = 0; r < round; ++r) { /* Phase 1*/ UpdateIKJ32 <<< 1, dim3(32,32) >>> (r, dDist, n); /* Phase 2*/ UpdateIK32 <<< round-1, dim3(32,32), 0, getIdleStream() >>> (r, dDist, n); UpdateKJ32 <<< round-1, dim3(32,32), 0, getIdleStream() >>> (r, dDist, n); syncAllStreams(); /* Phase 3*/ Update32 <<< dim3(round-1, round-1), dim3(32,32) >>> (r, dDist, n); } } else for (int r = 0; r < round; ++r) { /* Phase 1*/ calAsync(B, r, r, r, 1, 1); syncAllStreams(); /* Phase 2*/ calAsync(B, r, r, 0, r, 1); calAsync(B, r, r, r +1, round - r -1, 1); calAsync(B, r, 0, r, 1, r); calAsync(B, r, r +1, r, 1, round - r -1); syncAllStreams(); /* Phase 3*/ calAsync(B, r, 0, 0, r, r); calAsync(B, r, 0, r +1, round -r -1, r); calAsync(B, r, r +1, 0, r, round - r -1); calAsync(B, r, r +1, r +1, round -r -1, round - r -1); syncAllStreams(); } cudaMemcpy(Dist, dDist, sizeof(int) * n * n, cudaMemcpyDeviceToHost); cudaFree(dDist); cudaFree(dPivot); } __global__ void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int i = blockDim.x * blockIdx.x + threadIdx.x + i0; int j = blockDim.y * blockIdx.y + threadIdx.y + j0; if(i >= i1 || j >= j1) return; updateMin(D(i, j), D(i, k) + D(k, j)); } __global__ void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n) { #define D(i,j) (dDist[(i) * n + (j)]) int tx = threadIdx.x; int ty = threadIdx.y; int di = blockDim.x * blockIdx.x + tx; int dj = blockDim.y * blockIdx.y + ty; int i = i0 + di; int j = j0 + dj; bool valid = i < i1 && j < j1; __shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; __shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2]; const int cacheSize = MAX_THREAD_DIM2; int Dij = valid? D(i, j): 0; int dkmod = 0; for(int k = k0; k < k1; ++k) { if(dkmod == 0) { __syncthreads(); if(i < i1 && k+ty < k1) Si[ty][tx] = D(i, k+ty); if(j < j1 && k+tx < k1) Sj[tx][ty] = D(k+tx, j); __syncthreads(); } if(valid) { // assert(Si[tx][dkmod] == D(i,k)); // assert(Sj[dkmod][ty] == D(k,j)); // int Dik = D(i, k); // int Dkj = D(k, j); int Dik = Si[dkmod][tx]; int Dkj = Sj[dkmod][ty]; updateMin(Dij, Dik + Dkj); } dkmod = (dkmod + 1) % cacheSize; } if(valid) D(i, j) = Dij; } void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; int block_total = block_width * block_height; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times // for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int i0 = b_i * B; int i1 = min((b_i +1) * B, n); int j0 = b_j * B; int j1 = min((b_j +1) * B, n); int k0 = Round * B; int k1 = min((Round +1) * B, n); bool iDepends = i0 == k0; bool jDepends = j0 == k0; int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2); int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2; dim3 grid(blockDim, blockDim), block(threadDim, threadDim); cudaStream_t stm = getIdleStream(); if(iDepends || jDepends) { for(int k=k0; k<k1; ++k) Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, dDist, n); } else UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, dDist, n); // for (int i = i0; i < i1; ++i) { // for (int j = j0; j < j1; ++j) { // if (Dist[i][k] + Dist[k][j] < Dist[i][j]) // Dist[i][j] = Dist[i][k] + Dist[k][j]; // } // } // } } } }
b0dc3e05068ab425afa4c5852f318192b34c2724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" // CUDA runtime // Helper functions and utilities to work with CUDA //Standard C library #define subCOL 5248 #define COL 5248 #define ROW 358 #define WARPABLEROW 512 #define blocksize 256 #define subMatDim subCOL*WARPABLEROW #define targetMatDim ROW * COL __global__ void reduce2(int *g_idata, int *g_odata, int g_size) { __shared__ int sdata[blocksize]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s*tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
b0dc3e05068ab425afa4c5852f318192b34c2724.cu
#include "includes.h" // CUDA runtime // Helper functions and utilities to work with CUDA //Standard C library #define subCOL 5248 #define COL 5248 #define ROW 358 #define WARPABLEROW 512 #define blocksize 256 #define subMatDim subCOL*WARPABLEROW #define targetMatDim ROW * COL __global__ void reduce2(int *g_idata, int *g_odata, int g_size) { __shared__ int sdata[blocksize]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { int index = 2 * s*tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
a944172e8621e0fbd031634efa666c645c4ca219.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file color_app.cu * * @brief Graph Coloring Gunrock Application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definitions #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> #include <gunrock/graphio/graphio.cuh> // Graph Coloring #include <gunrock/app/color/color_enactor.cuh> #include <gunrock/app/color/color_test.cuh> // Others #include <cstdio> namespace gunrock { namespace app { namespace color { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(UseParameters_test(parameters)); GUARD_CU(parameters.Use<unsigned int>( "num-colors", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER, 0, "number of output colors", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Serially compare rand to all node neighbor, disable to use advance \ neighbor reduce (default=false)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Enable coloring with minimum independent set as well as \ maximum(default=true)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Perform test run to atomically generate max iteration (default=true)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "user-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Number of iterations color should run for (default=3).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false, "Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0, "Resolve color conflict, 0 to skip check, 1 to check at end of\ every iteration with random,\ 2 to check at end of every iteration with degree(default = 0).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0, "Needed to allocate memory for hash function, if parameter is\ positive,\ hash coloring is used instead of random coloring (default = 0).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL), "seed for random number generator", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false, "load balancing enabled for graph coloring (true=neighbor_reduce)", __FILE__, __LINE__)); return retval; } /** * @brief Run color tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return hipError_t error message(s), if any */ template <typename GraphT> hipError_t RunTests(util::Parameters &parameters, GraphT &graph, bool color_balance, typename GraphT::VertexT *ref_colors, util::Location target) { hipError_t retval = hipSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); util::Info info("color", parameters, graph); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); VertexT *h_colors = new VertexT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); int num_colors = 0; for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_colors)); SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors, false); } } cpu_timer.Start(); GUARD_CU(problem.Extract(h_colors)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors, false); } // count number of colors std::unordered_set<int> set; for (SizeT v = 0; v < graph.nodes; v++) { int c = h_colors[v]; if (set.find(c) == set.end()) { set.insert(c); num_colors++; } } util::PrintMsg("Number of colors: " + std::to_string(num_colors), !quiet_mode); parameters.Set("num-colors", num_colors); // compute running statistics info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); delete[] h_colors; h_colors = NULL; cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace color } // namespace app } // namespace gunrock /* * @brief Entry of gunrock_color function * @tparam GraphT Type of the graph * @tparam VertexT Type of the colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ template <typename GraphT, typename VertexT = typename GraphT::VertexT, typename SizeT = typename GraphT::SizeT> double gunrock_color(gunrock::util::Parameters &parameters, GraphT &graph, VertexT **colors, SizeT *num_colors) { typedef gunrock::app::color::Problem<GraphT> ProblemT; typedef gunrock::app::color::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, target); enactor.Init(problem, target); int num_runs = parameters.Get<int>("num-runs"); for (int run_num = 0; run_num < num_runs; ++run_num) { problem.Reset(target); enactor.Reset(target); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(colors[run_num]); // count number of colors std::unordered_set<int> set; for (SizeT v = 0; v < graph.nodes; v++) { int c = colors[run_num][v]; if (set.find(c) == set.end()) { set.insert(c); num_colors[run_num] += 1; } } } enactor.Release(target); problem.Release(target); return total_time; } /* * @brief Entry of gunrock_color function * @tparam VertexT Type of the colors * @tparam SizeT Type of the num_colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename GValueT = unsigned int> double color(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int num_runs, int **colors, int *num_colors, const GValueT edge_values = NULL) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT, gunrock::graph::HAS_CSR> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("color"); gunrock::graphio::UseParameters(parameters); gunrock::app::color::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); parameters.Set("num-runs", num_runs); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST); // graph.FromCsr(graph.csr(), true, quiet); gunrock::graphio::LoadGraph(parameters, graph); // Run the graph coloring double elapsed_time = gunrock_color(parameters, graph, colors, num_colors); // Cleanup graph.Release(); return elapsed_time; } /* * @brief Entry of gunrock_color function * @tparam VertexT Type of the colors * @tparam SizeT Type of the num_colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ double color(const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, int *colors, int num_colors) { return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */, &colors, &num_colors); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
a944172e8621e0fbd031634efa666c645c4ca219.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file color_app.cu * * @brief Graph Coloring Gunrock Application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definitions #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> #include <gunrock/graphio/graphio.cuh> // Graph Coloring #include <gunrock/app/color/color_enactor.cuh> #include <gunrock/app/color/color_test.cuh> // Others #include <cstdio> namespace gunrock { namespace app { namespace color { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(UseParameters_test(parameters)); GUARD_CU(parameters.Use<unsigned int>( "num-colors", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER, 0, "number of output colors", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Serially compare rand to all node neighbor, disable to use advance \ neighbor reduce (default=false)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Enable coloring with minimum independent set as well as \ maximum(default=true)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true, "Perform test run to atomically generate max iteration (default=true)", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "user-iter", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER, 3, "Number of iterations color should run for (default=3).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false, "Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0, "Resolve color conflict, 0 to skip check, 1 to check at end of\ every iteration with random,\ 2 to check at end of every iteration with degree(default = 0).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0, "Needed to allocate memory for hash function, if parameter is\ positive,\ hash coloring is used instead of random coloring (default = 0).", __FILE__, __LINE__)); GUARD_CU(parameters.Use<int>( "seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL), "seed for random number generator", __FILE__, __LINE__)); GUARD_CU(parameters.Use<bool>( "LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false, "load balancing enabled for graph coloring (true=neighbor_reduce)", __FILE__, __LINE__)); return retval; } /** * @brief Run color tests * @tparam GraphT Type of the graph * @tparam ValueT Type of the distances * @param[in] parameters Excution parameters * @param[in] graph Input graph ... * @param[in] target where to perform the app * \return cudaError_t error message(s), if any */ template <typename GraphT> cudaError_t RunTests(util::Parameters &parameters, GraphT &graph, bool color_balance, typename GraphT::VertexT *ref_colors, util::Location target) { cudaError_t retval = cudaSuccess; typedef typename GraphT::VertexT VertexT; typedef typename GraphT::ValueT ValueT; typedef typename GraphT::SizeT SizeT; typedef Problem<GraphT> ProblemT; typedef Enactor<ProblemT> EnactorT; // CLI parameters bool quiet_mode = parameters.Get<bool>("quiet"); int num_runs = parameters.Get<int>("num-runs"); std::string validation = parameters.Get<std::string>("validation"); util::Info info("color", parameters, graph); util::CpuTimer cpu_timer, total_timer; cpu_timer.Start(); total_timer.Start(); VertexT *h_colors = new VertexT[graph.nodes]; // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; GUARD_CU(problem.Init(graph, target)); GUARD_CU(enactor.Init(problem, target)); cpu_timer.Stop(); parameters.Set("preprocess-time", cpu_timer.ElapsedMillis()); int num_colors = 0; for (int run_num = 0; run_num < num_runs; ++run_num) { GUARD_CU(problem.Reset(target)); GUARD_CU(enactor.Reset(target)); util::PrintMsg("__________________________", !quiet_mode); cpu_timer.Start(); GUARD_CU(enactor.Enact()); cpu_timer.Stop(); info.CollectSingleRun(cpu_timer.ElapsedMillis()); util::PrintMsg( "--------------------------\nRun " + std::to_string(run_num) + " elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) + ", #iterations = " + std::to_string(enactor.enactor_slices[0].enactor_stats.iteration), !quiet_mode); if (validation == "each") { GUARD_CU(problem.Extract(h_colors)); SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors, false); } } cpu_timer.Start(); GUARD_CU(problem.Extract(h_colors)); if (validation == "last") { SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors, false); } // count number of colors std::unordered_set<int> set; for (SizeT v = 0; v < graph.nodes; v++) { int c = h_colors[v]; if (set.find(c) == set.end()) { set.insert(c); num_colors++; } } util::PrintMsg("Number of colors: " + std::to_string(num_colors), !quiet_mode); parameters.Set("num-colors", num_colors); // compute running statistics info.ComputeTraversalStats(enactor, (VertexT *)NULL); // Display_Memory_Usage(problem); #ifdef ENABLE_PERFORMANCE_PROFILING // Display_Performance_Profiling(&enactor); #endif // Clean up GUARD_CU(enactor.Release(target)); GUARD_CU(problem.Release(target)); delete[] h_colors; h_colors = NULL; cpu_timer.Stop(); total_timer.Stop(); info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis()); return retval; } } // namespace color } // namespace app } // namespace gunrock /* * @brief Entry of gunrock_color function * @tparam GraphT Type of the graph * @tparam VertexT Type of the colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ template <typename GraphT, typename VertexT = typename GraphT::VertexT, typename SizeT = typename GraphT::SizeT> double gunrock_color(gunrock::util::Parameters &parameters, GraphT &graph, VertexT **colors, SizeT *num_colors) { typedef gunrock::app::color::Problem<GraphT> ProblemT; typedef gunrock::app::color::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, target); enactor.Init(problem, target); int num_runs = parameters.Get<int>("num-runs"); for (int run_num = 0; run_num < num_runs; ++run_num) { problem.Reset(target); enactor.Reset(target); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(colors[run_num]); // count number of colors std::unordered_set<int> set; for (SizeT v = 0; v < graph.nodes; v++) { int c = colors[run_num][v]; if (set.find(c) == set.end()) { set.insert(c); num_colors[run_num] += 1; } } } enactor.Release(target); problem.Release(target); return total_time; } /* * @brief Entry of gunrock_color function * @tparam VertexT Type of the colors * @tparam SizeT Type of the num_colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename GValueT = unsigned int> double color(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int num_runs, int **colors, int *num_colors, const GValueT edge_values = NULL) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT, gunrock::graph::HAS_CSR> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("color"); gunrock::graphio::UseParameters(parameters); gunrock::app::color::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); parameters.Set("num-runs", num_runs); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST); // graph.FromCsr(graph.csr(), true, quiet); gunrock::graphio::LoadGraph(parameters, graph); // Run the graph coloring double elapsed_time = gunrock_color(parameters, graph, colors, num_colors); // Cleanup graph.Release(); return elapsed_time; } /* * @brief Entry of gunrock_color function * @tparam VertexT Type of the colors * @tparam SizeT Type of the num_colors * @param[in] parameters Excution parameters * @param[in] graph Input graph * @param[out] colors Return generated colors for each run * @param[out] num_colors Return number of colors generated for each run * \return double Return accumulated elapsed times for all runs */ double color(const int num_nodes, const int num_edges, const int *row_offsets, const int *col_indices, int *colors, int num_colors) { return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */, &colors, &num_colors); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
3dbd848b5038251a1e4573b3acca0e9311a40c83.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2018, Lawrence Livermore National Security, LLC. Produced at the // Lawrence Livermore National Laboratory in collaboration with University of // Illinois Urbana-Champaign. // // Written by the LBANN Research Team (N. Dryden, N. Maruyama, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-756777. // All rights reserved. // // This file is part of Aluminum GPU-aware Communication Library. For details, see // http://software.llnl.gov/Aluminum or https://github.com/LLNL/Aluminum. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include <hip/hip_runtime.h> //#include <thrust/transform.h> //#include <thrust/device_ptr.h> #include "mpi_cuda/cuda_kernels.hpp" namespace Al { namespace internal { namespace mpi_cuda { template <typename T, ReductionOperator op> struct BinaryOp; template <typename T> struct BinaryOp<T, ReductionOperator::sum> { __device__ static T calc(const T& x, const T& y) { return x + y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::prod> { __device__ static T calc(const T& x, const T& y) { return x * y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::min> { __device__ static T calc(const T& x, const T& y) { return (x < y ? x : y); } }; template <typename T> struct BinaryOp<T, ReductionOperator::max> { __device__ static T calc(const T& x, const T& y) { return (x > y ? x : y); } }; template <typename T> struct BinaryOp<T, ReductionOperator::lor> { __device__ static T calc(const T& x, const T& y) { return x || y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::land> { __device__ static T calc(const T& x, const T& y) { return x && y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::lxor> { __device__ static T calc(const T& x, const T& y) { return !x != !y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::bor> { __device__ static T calc(const T& x, const T& y) { return x | y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::band> { __device__ static T calc(const T& x, const T& y) { return x & y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::bxor> { __device__ static T calc(const T& x, const T& y) { return x ^ y; } }; template <typename T, int len> struct ShortVectorType { using type = T; }; template <> struct ShortVectorType<float, 1> { using type = float; }; template <> struct ShortVectorType<float, 2> { using type = float2; }; template <> struct ShortVectorType<float, 4> { using type = float4; }; template <> struct ShortVectorType<double, 1> { using type = double; }; template <> struct ShortVectorType<double, 2> { using type = double2; }; template <> struct ShortVectorType<double, 4> { using type = double4; }; template <> struct ShortVectorType<int, 1> { using type = int; }; template <> struct ShortVectorType<int, 2> { using type = int2; }; template <> struct ShortVectorType<int, 4> { using type = int4; }; template <> struct ShortVectorType<long, 1> { using type = long; }; template <> struct ShortVectorType<long, 2> { using type = long2; }; template <> struct ShortVectorType<long, 4> { using type = long4; }; template <ReductionOperator op, typename T, int VectorLen> struct ReduceKernel; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 1> { __device__ static void kernel(void *dst, const void *src, size_t count) { size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; T *dst_t = static_cast<T*>(dst); const T *src_t = static_cast<const T*>(src); dst_t[offset] = BinaryOp<T, op>::calc(dst_t[offset], src_t[offset]); } }; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 2> { __device__ static void kernel(void *dst, const void *src, size_t count) { using VectorT = typename ShortVectorType<T, 2>::type; size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; VectorT *dst_vector = static_cast<VectorT*>(dst); const VectorT *src_vector = static_cast<const VectorT*>(src); VectorT d = dst_vector[offset]; VectorT s = src_vector[offset]; d.x = BinaryOp<T, op>::calc(d.x, s.x); d.y = BinaryOp<T, op>::calc(d.y, s.y); dst_vector[offset] = d; } }; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 4> { __device__ static void kernel(void *dst, const void *src, size_t count) { using VectorT = typename ShortVectorType<T, 4>::type; size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; VectorT *dst_vector = static_cast<VectorT*>(dst); const VectorT *src_vector = static_cast<const VectorT*>(src); VectorT d = dst_vector[offset]; VectorT s = src_vector[offset]; d.x = BinaryOp<T, op>::calc(d.x, s.x); d.y = BinaryOp<T, op>::calc(d.y, s.y); d.z = BinaryOp<T, op>::calc(d.z, s.z); d.w = BinaryOp<T, op>::calc(d.w, s.w); dst_vector[offset] = d; } }; template <ReductionOperator op, typename T, int VectorLen> __global__ void reduce_kernel(void *dst, const void *src, size_t count) { ReduceKernel<op, T, VectorLen>::kernel(dst, src, count); } template <ReductionOperator op, typename T, int VectorLen> void reduce_v(void *dst, const void *src, size_t count, hipStream_t s) { using VectorT = typename ShortVectorType<T, VectorLen>::type; if (count == 0) return; int tb_dim = 256; count /= VectorLen; int grid_dim = count / tb_dim + (count % tb_dim ? 1 : 0); #ifdef AL_MPI_CUDA_DEBUG // clear remaining error flag hipGetLastError(); #endif hipLaunchKernelGGL(( reduce_kernel<op, T, VectorLen>), dim3(grid_dim), dim3(tb_dim), 0, s, dst, src, count); #ifdef AL_MPI_CUDA_DEBUG hipError_t e = hipPeekAtLastError(); if (e != hipSuccess) { throw_al_exception(hipGetErrorString(e)); } #endif } // Passing each function argument to a template parameter template <typename T, int VectorLen> void reduce_v(void *dst, const void *src, size_t count, hipStream_t s, ReductionOperator op) { switch (op) { case ReductionOperator::sum: reduce_v<ReductionOperator::sum, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::prod: reduce_v<ReductionOperator::prod, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::min: reduce_v<ReductionOperator::min, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::max: reduce_v<ReductionOperator::max, T, VectorLen>(dst, src, count, s); break; default: throw_al_exception("Unknown reduction operator"); } } template <int VectorLen> void reduce_v(void *dst, const void *src, size_t count, hipStream_t s, ReductionOperator op, ReductionOperandType type) { switch (type) { case ReductionOperandType::INT: reduce_v<int, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::LONG: reduce_v<long, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::FLOAT: reduce_v<float, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::DOUBLE: reduce_v<double, VectorLen>(dst, src, count, s, op); break; default: throw_al_exception("Unknown operand type"); } } void reduce1(void *dst, const void *src, size_t count, hipStream_t s, ReductionOperator op, ReductionOperandType type) { reduce_v<1>(dst, src, count, s, op, type); } void reduce2(void *dst, const void *src, size_t count, hipStream_t s, ReductionOperator op, ReductionOperandType type) { if ((count % 2) == 0) { reduce_v<2>(dst, src, count, s, op, type); } else { reduce1(dst, src, count, s, op, type); } } void reduce4(void *dst, const void *src, size_t count, hipStream_t s, ReductionOperator op, ReductionOperandType type) { if ((count % 4) == 0) { reduce_v<4>(dst, src, count, s, op, type); } else { reduce1(dst, src, count, s, op, type); } } #if 0 void reduce_thrust(float *dst, const float *src, size_t count, hipStream_t s) { thrust::device_ptr<float> dst_ptr(dst); thrust::device_ptr<const float> src_ptr(src); thrust::transform(thrust::hip::par.on(s), src_ptr, src_ptr + count, dst_ptr, dst_ptr, thrust::plus<float>()); } #endif } // namespace mpi_cuda } // namespace internal } // namespace Al
3dbd848b5038251a1e4573b3acca0e9311a40c83.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2018, Lawrence Livermore National Security, LLC. Produced at the // Lawrence Livermore National Laboratory in collaboration with University of // Illinois Urbana-Champaign. // // Written by the LBANN Research Team (N. Dryden, N. Maruyama, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-756777. // All rights reserved. // // This file is part of Aluminum GPU-aware Communication Library. For details, see // http://software.llnl.gov/Aluminum or https://github.com/LLNL/Aluminum. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include <cuda_runtime.h> //#include <thrust/transform.h> //#include <thrust/device_ptr.h> #include "mpi_cuda/cuda_kernels.hpp" namespace Al { namespace internal { namespace mpi_cuda { template <typename T, ReductionOperator op> struct BinaryOp; template <typename T> struct BinaryOp<T, ReductionOperator::sum> { __device__ static T calc(const T& x, const T& y) { return x + y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::prod> { __device__ static T calc(const T& x, const T& y) { return x * y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::min> { __device__ static T calc(const T& x, const T& y) { return (x < y ? x : y); } }; template <typename T> struct BinaryOp<T, ReductionOperator::max> { __device__ static T calc(const T& x, const T& y) { return (x > y ? x : y); } }; template <typename T> struct BinaryOp<T, ReductionOperator::lor> { __device__ static T calc(const T& x, const T& y) { return x || y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::land> { __device__ static T calc(const T& x, const T& y) { return x && y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::lxor> { __device__ static T calc(const T& x, const T& y) { return !x != !y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::bor> { __device__ static T calc(const T& x, const T& y) { return x | y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::band> { __device__ static T calc(const T& x, const T& y) { return x & y; } }; template <typename T> struct BinaryOp<T, ReductionOperator::bxor> { __device__ static T calc(const T& x, const T& y) { return x ^ y; } }; template <typename T, int len> struct ShortVectorType { using type = T; }; template <> struct ShortVectorType<float, 1> { using type = float; }; template <> struct ShortVectorType<float, 2> { using type = float2; }; template <> struct ShortVectorType<float, 4> { using type = float4; }; template <> struct ShortVectorType<double, 1> { using type = double; }; template <> struct ShortVectorType<double, 2> { using type = double2; }; template <> struct ShortVectorType<double, 4> { using type = double4; }; template <> struct ShortVectorType<int, 1> { using type = int; }; template <> struct ShortVectorType<int, 2> { using type = int2; }; template <> struct ShortVectorType<int, 4> { using type = int4; }; template <> struct ShortVectorType<long, 1> { using type = long; }; template <> struct ShortVectorType<long, 2> { using type = long2; }; template <> struct ShortVectorType<long, 4> { using type = long4; }; template <ReductionOperator op, typename T, int VectorLen> struct ReduceKernel; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 1> { __device__ static void kernel(void *dst, const void *src, size_t count) { size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; T *dst_t = static_cast<T*>(dst); const T *src_t = static_cast<const T*>(src); dst_t[offset] = BinaryOp<T, op>::calc(dst_t[offset], src_t[offset]); } }; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 2> { __device__ static void kernel(void *dst, const void *src, size_t count) { using VectorT = typename ShortVectorType<T, 2>::type; size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; VectorT *dst_vector = static_cast<VectorT*>(dst); const VectorT *src_vector = static_cast<const VectorT*>(src); VectorT d = dst_vector[offset]; VectorT s = src_vector[offset]; d.x = BinaryOp<T, op>::calc(d.x, s.x); d.y = BinaryOp<T, op>::calc(d.y, s.y); dst_vector[offset] = d; } }; template <ReductionOperator op, typename T> struct ReduceKernel<op, T, 4> { __device__ static void kernel(void *dst, const void *src, size_t count) { using VectorT = typename ShortVectorType<T, 4>::type; size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= count) return; VectorT *dst_vector = static_cast<VectorT*>(dst); const VectorT *src_vector = static_cast<const VectorT*>(src); VectorT d = dst_vector[offset]; VectorT s = src_vector[offset]; d.x = BinaryOp<T, op>::calc(d.x, s.x); d.y = BinaryOp<T, op>::calc(d.y, s.y); d.z = BinaryOp<T, op>::calc(d.z, s.z); d.w = BinaryOp<T, op>::calc(d.w, s.w); dst_vector[offset] = d; } }; template <ReductionOperator op, typename T, int VectorLen> __global__ void reduce_kernel(void *dst, const void *src, size_t count) { ReduceKernel<op, T, VectorLen>::kernel(dst, src, count); } template <ReductionOperator op, typename T, int VectorLen> void reduce_v(void *dst, const void *src, size_t count, cudaStream_t s) { using VectorT = typename ShortVectorType<T, VectorLen>::type; if (count == 0) return; int tb_dim = 256; count /= VectorLen; int grid_dim = count / tb_dim + (count % tb_dim ? 1 : 0); #ifdef AL_MPI_CUDA_DEBUG // clear remaining error flag cudaGetLastError(); #endif reduce_kernel<op, T, VectorLen><<<grid_dim, tb_dim, 0, s>>>( dst, src, count); #ifdef AL_MPI_CUDA_DEBUG cudaError_t e = cudaPeekAtLastError(); if (e != cudaSuccess) { throw_al_exception(cudaGetErrorString(e)); } #endif } // Passing each function argument to a template parameter template <typename T, int VectorLen> void reduce_v(void *dst, const void *src, size_t count, cudaStream_t s, ReductionOperator op) { switch (op) { case ReductionOperator::sum: reduce_v<ReductionOperator::sum, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::prod: reduce_v<ReductionOperator::prod, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::min: reduce_v<ReductionOperator::min, T, VectorLen>(dst, src, count, s); break; case ReductionOperator::max: reduce_v<ReductionOperator::max, T, VectorLen>(dst, src, count, s); break; default: throw_al_exception("Unknown reduction operator"); } } template <int VectorLen> void reduce_v(void *dst, const void *src, size_t count, cudaStream_t s, ReductionOperator op, ReductionOperandType type) { switch (type) { case ReductionOperandType::INT: reduce_v<int, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::LONG: reduce_v<long, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::FLOAT: reduce_v<float, VectorLen>(dst, src, count, s, op); break; case ReductionOperandType::DOUBLE: reduce_v<double, VectorLen>(dst, src, count, s, op); break; default: throw_al_exception("Unknown operand type"); } } void reduce1(void *dst, const void *src, size_t count, cudaStream_t s, ReductionOperator op, ReductionOperandType type) { reduce_v<1>(dst, src, count, s, op, type); } void reduce2(void *dst, const void *src, size_t count, cudaStream_t s, ReductionOperator op, ReductionOperandType type) { if ((count % 2) == 0) { reduce_v<2>(dst, src, count, s, op, type); } else { reduce1(dst, src, count, s, op, type); } } void reduce4(void *dst, const void *src, size_t count, cudaStream_t s, ReductionOperator op, ReductionOperandType type) { if ((count % 4) == 0) { reduce_v<4>(dst, src, count, s, op, type); } else { reduce1(dst, src, count, s, op, type); } } #if 0 void reduce_thrust(float *dst, const float *src, size_t count, cudaStream_t s) { thrust::device_ptr<float> dst_ptr(dst); thrust::device_ptr<const float> src_ptr(src); thrust::transform(thrust::cuda::par.on(s), src_ptr, src_ptr + count, dst_ptr, dst_ptr, thrust::plus<float>()); } #endif } // namespace mpi_cuda } // namespace internal } // namespace Al
2756b58e9487ff66fb32cd8389739004b9211e09.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include "common.h" #include <sys/time.h> void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = true; for (int i = 0; i < N; ++i) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = false; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match == true) { printf("Arrays match.\n\n"); } } void initialData(float *data, int size) { // Generate different seed for random number time_t t; srand((unsigned int)time(&t)); for (int i = 0; i < size; ++i) { data[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int i = 0; i < N; ++i) { C[i] = A[i] + B[i]; } return; } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; return; } double cpuSecond(void) { struct timeval tv; gettimeofday(&tv, NULL); return (double(tv.tv_sec) + double(tv.tv_usec) * 1.E-6); } int main(int argc, char *argv[]) { printf("%s Starting...\n", argv[0]); // set up device hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, 0)); printf("Using Device %d: %s\n", 0, deviceProp.name); hipSetDevice(0); // set up data size of vectors int nElem = 1 << 24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory float *d_A, *d_B, *d_C; hipMalloc(&d_A, nBytes); hipMalloc(&d_B, nBytes); hipMalloc(&d_C, nBytes); // transfer data from host to device hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice); // invoke kernel at host side dim3 block(64); dim3 grid((nElem + block.x - 1) / block.x); double iStart = cpuSecond(); hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem); CHECK(hipDeviceSynchronize()); double iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<<%d, %d>>> elapsed %f sec.\n", grid.x, block.x, iElaps); // copy kernel result back to host side hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost(h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; printf("sumArraysOnHost elapsed %f sec.\n", iElaps); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return 0; }
2756b58e9487ff66fb32cd8389739004b9211e09.cu
#include <cuda_runtime.h> #include <stdio.h> #include "common.h" #include <sys/time.h> void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = true; for (int i = 0; i < N; ++i) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = false; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match == true) { printf("Arrays match.\n\n"); } } void initialData(float *data, int size) { // Generate different seed for random number time_t t; srand((unsigned int)time(&t)); for (int i = 0; i < size; ++i) { data[i] = (float)(rand() & 0xFF) / 10.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int i = 0; i < N; ++i) { C[i] = A[i] + B[i]; } return; } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; return; } double cpuSecond(void) { struct timeval tv; gettimeofday(&tv, NULL); return (double(tv.tv_sec) + double(tv.tv_usec) * 1.E-6); } int main(int argc, char *argv[]) { printf("%s Starting...\n", argv[0]); // set up device cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, 0)); printf("Using Device %d: %s\n", 0, deviceProp.name); cudaSetDevice(0); // set up data size of vectors int nElem = 1 << 24; printf("Vector size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize data at host side initialData(h_A, nElem); initialData(h_B, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory float *d_A, *d_B, *d_C; cudaMalloc(&d_A, nBytes); cudaMalloc(&d_B, nBytes); cudaMalloc(&d_C, nBytes); // transfer data from host to device cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); // invoke kernel at host side dim3 block(64); dim3 grid((nElem + block.x - 1) / block.x); double iStart = cpuSecond(); sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem); CHECK(cudaDeviceSynchronize()); double iElaps = cpuSecond() - iStart; printf("sumArraysOnGPU <<<%d, %d>>> elapsed %f sec.\n", grid.x, block.x, iElaps); // copy kernel result back to host side cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost); // add vector at host side for result checks iStart = cpuSecond(); sumArraysOnHost(h_A, h_B, hostRef, nElem); iElaps = cpuSecond() - iStart; printf("sumArraysOnHost elapsed %f sec.\n", iElaps); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return 0; }
f18c4b99ee4f90585438bedfb469941914c23f13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <cstdio> #include <ctime> #include <vector> #include <time.h> #include <SDL.h> #include "bitmap.hh" #include "gui.hh" #include "star.hh" #include "util.hh" #include "vec2d.hh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> using namespace std; // Screen size #define WIDTH 640 #define HEIGHT 480 // Minimum time between clicks #define CREATE_INTERVAL 1000 // Time step size #define DT 0.04 // Gravitational constant #define G 1 // Update all stars in the simulation void updateStars(); //CUDA compute forces and update all stars __global__ void computeForce(double* mass, double* posX, double* posY, double* forceX, double* forceY, int starSize, double* velX, double* velY, double* radius, int* merge, int* initialized); //CUDA update star's positions __global__ void updateStar(double* mass, double* posX, double* posY, double* prev_posX, double* prev_posY, double* forceX, double* forceY, double* velX, double* velY, int* initialized, int starSize, int* merge); // Draw a circle on a bitmap based on this star's position and radius void drawStar(bitmap* bmp, star s); // Add a "galaxy" of stars to the points list void addRandomGalaxy(double center_x, double center_y); // A list of stars being simulated vector<star> stars; // Offset of the current view int x_offset = 0; int y_offset = 0; /** * Entry point for the program * \param argc The number of command line arguments * \param argv An array of command line arguments */ // Keep track of how many stars are shown int starSize; int main(int argc, char** argv) { // Seed the random number generator srand(time(NULL)); // Create a GUI window gui ui("Galaxy Simulation", WIDTH, HEIGHT); // Start with the running flag set to true bool running = true; // Render everything using this bitmap bitmap bmp(WIDTH, HEIGHT); // Save the last time the mouse was clicked bool mouse_up = true; // Keep track of how many galaxies were made int galaxies = 0; // Count the number of frames int numOfFrames = 0; time_t startTime; // Loop until we get a quit event while(running) { // Increment number of frames numOfFrames++; // Keep track of runtime startTime = time_ms(); // Process events SDL_Event event; while(SDL_PollEvent(&event) == 1) { // If the event is a quit event, then leave the loop if(event.type == SDL_QUIT) running = false; } // Get the current mouse state int mouse_x, mouse_y; uint32_t mouse_state = SDL_GetMouseState(&mouse_x, &mouse_y); // If the left mouse button is pressed, create a new random "galaxy" if(mouse_state & SDL_BUTTON(SDL_BUTTON_LEFT)) { // Only create one if the mouse button has been released if(mouse_up) { addRandomGalaxy(mouse_x - x_offset, mouse_y - y_offset); galaxies++; // Increment galaxy count // Don't create another one until the mouse button is released mouse_up = false; } } else { // The mouse button was released mouse_up = true; } // Get the keyboard state const uint8_t* keyboard = SDL_GetKeyboardState(NULL); // If the up key is pressed, shift up one pixel if(keyboard[SDL_SCANCODE_UP]) { y_offset++; bmp.shiftDown(); // Shift pixels so scrolling doesn't create trails } // If the down key is pressed, shift down one pixel if(keyboard[SDL_SCANCODE_DOWN]) { y_offset--; bmp.shiftUp(); // Shift pixels so scrolling doesn't create trails } // If the right key is pressed, shift right one pixel if(keyboard[SDL_SCANCODE_RIGHT]) { x_offset--; bmp.shiftLeft(); // Shift pixels so scrolling doesn't create trails } // If the left key is pressed, shift left one pixel if(keyboard[SDL_SCANCODE_LEFT]) { x_offset++; bmp.shiftRight(); // Shift pixels so scrolling doesn't create trails } // Remove stars that have NaN positions for(int i=0; i<stars.size(); i++) { // Remove this star if it is too far from zero or has NaN position if(stars[i].pos().x() != stars[i].pos().x() || // A NaN value does not equal itself stars[i].pos().y() != stars[i].pos().y()) { stars.erase(stars.begin()+i); i--; continue; } } // Update star count starSize = stars.size(); // Create arrays for the star attributes // This is needed to compute forces and update stars on GPU double starMass[starSize]; double starPosX[starSize]; double starPrevX[starSize]; double starForceX[starSize]; double starVelX[starSize]; double starPosY[starSize]; double starPrevY[starSize]; double starForceY[starSize]; double starVelY[starSize]; int starInit[starSize]; double starRad[starSize]; int starMerge[starSize]; //Lets us know if a star has been merged. // Assign values to arrays. for(int i=0; i <starSize; i++) { starMass[i] = stars[i].mass(); starPosX[i] = stars[i].pos().x(); starPrevX[i] = stars[i].prev_pos().x(); starForceX[i] = stars[i].force().x(); starVelX[i] = stars[i].vel().x(); starPosY[i] = stars[i].pos().y(); starPrevY[i] = stars[i].prev_pos().y(); starForceY[i] = stars[i].force().y(); starVelY[i] = stars[i].vel().y(); starInit[i] = stars[i].initialized(); starRad[i] = stars[i].radius(); starMerge[i] = i; } // Create empty arrays for GPU double* starMassGPU; double* starPosXGPU; double* starPrevXGPU; double* starForceXGPU; double* starVelXGPU; double* starPosYGPU; double* starPrevYGPU; double* starForceYGPU; double* starVelYGPU; int* starInitGPU; double* starRadGPU; int* starMergeGPU; // Malloc all GPU arrays if(hipMalloc(&starMassGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starMassGPU on GPU\n"); exit(2); } if(hipMalloc(&starPosXGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starPosXGPU on GPU\n"); exit(2); } if(hipMalloc(&starPrevXGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starPrevXGPU on GPU\n"); exit(2); } if(hipMalloc(&starForceXGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starForceXGPU on GPU\n"); exit(2); } if(hipMalloc(&starVelXGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starVelXGPU on GPU\n"); exit(2); } if(hipMalloc(&starPosYGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starPosYGPU on GPU\n"); exit(2); } if(hipMalloc(&starPrevYGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starPrevYGPU on GPU\n"); exit(2); } if(hipMalloc(&starForceYGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starForceYGPU on GPU\n"); exit(2); } if(hipMalloc(&starVelYGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starVelYGPU on GPU\n"); exit(2); } if(hipMalloc(&starInitGPU, sizeof(int) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starInitGPU on GPU\n"); exit(2); } if(hipMalloc(&starRadGPU, sizeof(double) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starRadGPU on GPU\n"); exit(2); } if(hipMalloc(&starMergeGPU, sizeof(int) * (starSize)) != hipSuccess) { fprintf(stderr, "Failed to allocate starGPUGPU on GPU\n"); exit(2); } //Copy the host data to device if(hipMemcpy(starMassGPU, starMass, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starMassGPU to the GPU"); } if(hipMemcpy(starPosXGPU, starPosX, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starPosXGPU to the GPU"); } if(hipMemcpy(starPrevXGPU, starPrevX, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starPrevXGPU to the GPU"); } if(hipMemcpy(starForceXGPU, starForceX, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starForceXGPU to the GPU"); } if(hipMemcpy(starVelXGPU, starVelX, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starVelXGPU to the GPU"); } if(hipMemcpy(starPosYGPU, starPosY, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starPosYGPU to the GPU"); } if(hipMemcpy(starPrevYGPU, starPrevY, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starPrevYGPU to the GPU"); } if(hipMemcpy(starForceYGPU, starForceY, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starForceYGPU to the GPU"); } if(hipMemcpy(starVelYGPU, starVelY, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starVelYGPU to the GPU"); } if(hipMemcpy(starInitGPU, starInit, sizeof(int) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the GPU"); } if(hipMemcpy(starRadGPU, starRad, sizeof(double) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starRadGPU to the GPU"); } if(hipMemcpy(starMergeGPU, starMerge, sizeof(int) * (starSize), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "Failed to copy starMergeGPU to the GPU"); } // Compute forces on GPU with blocks for each star hipLaunchKernelGGL(( computeForce), dim3(starSize),dim3(1), 0, 0, starMassGPU, starPosXGPU, starPosYGPU, starForceXGPU, starForceYGPU, starSize, starVelXGPU, starVelYGPU, starRadGPU, starMergeGPU, starInitGPU); hipDeviceSynchronize(); // Update star positions on GPU with blocks for each star hipLaunchKernelGGL(( updateStar), dim3(starSize),dim3(1), 0, 0, starMassGPU, starPosXGPU, starPosYGPU, starPrevXGPU, starPrevYGPU, starForceXGPU, starForceYGPU, starVelXGPU, starVelYGPU, starInitGPU, starSize, starMergeGPU); hipDeviceSynchronize(); // Copy the device data to the host if(hipMemcpy(&starMass, starMassGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starMassGPU to the CPU\n"); } if(hipMemcpy(&starPosX, starPosXGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starPosXGPU to the CPU\n"); } if(hipMemcpy(&starPrevX, starPrevXGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starPrevXGPU to the CPU\n"); } if(hipMemcpy(&starVelX, starVelXGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starVelXGPU to the CPU\n"); } if(hipMemcpy(&starPosY, starPosYGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starPosYGPU to the CPU\n"); } if(hipMemcpy(&starPrevY, starPrevYGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starPrevYGPU to the CPU\n"); } if(hipMemcpy(&starVelY, starVelYGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starVelYGPU to the CPU\n"); } if(hipMemcpy(&starInit, starInitGPU, sizeof(int) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the CPU\n"); } if(hipMemcpy(&starRad, starRadGPU, sizeof(double) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starRadGPU to the CPU\n"); } if(hipMemcpy(&starMerge, starMergeGPU, sizeof(int) * (starSize), hipMemcpyDeviceToHost) != hipSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the CPU\n"); } // Free the arrays on the GPU hipFree(starMassGPU); hipFree(starPosXGPU); hipFree(starPrevXGPU); hipFree(starVelXGPU); hipFree( starForceXGPU); hipFree(starPosYGPU); hipFree(starPrevYGPU); hipFree(starForceYGPU); hipFree(starVelYGPU); hipFree(starInitGPU); hipFree(starMergeGPU); hipFree(starRadGPU); // Update the stars in the vector with the data from the arrays // Also remove the stars that were merged int displacement = 0; int j = 0; for(int i=0; i <(starSize - displacement); i++) { if(starMerge[j] == j) { stars[i].changeMass(starMass[j]); stars[i].changePos(vec2d(starPosX[j], starPosY[j])); stars[i].changePrev(vec2d(starPrevX[j], starPrevY[j])); stars[i].changeVel(vec2d(starVelX[j], starVelY[j])); stars[i].changeInit(starInit[j]); }// if else { stars.erase(stars.begin() + i); i--; displacement++; }// else j++; }// fpr // Darken the bitmap instead of clearing it to leave trails bmp.darken(0.92); // Draw stars for(int i=0; i<stars.size(); i++) { drawStar(&bmp, stars[i]); }// for // Display the rendered frame ui.display(bmp); // Calculate the run time time_t endTime = time_ms(); time_t elapsedTime = endTime - startTime; // Print frame data for 2000 frames if (numOfFrames < 2000) printf("%d, %d, %lu\n", galaxies, numOfFrames, elapsedTime); }// while return 0; }// main // Compute the force on the star based on the forces on the other stars // This is done on the GPU __global__ void computeForce(double* mass, double* posX, double* posY, double* forceX, double* forceY, int starSize, double* velX, double* velY, double* radius, int* merge, int* initialized){ // If the star has not been merged if(merge[blockIdx.x] == blockIdx.x) { double m1 = mass[blockIdx.x]; vec2d pos = vec2d(posX[blockIdx.x], posY[blockIdx.x]); vec2d vel = vec2d(velX[blockIdx.x], velY[blockIdx.x]); // Loop on all other stars for(int j = blockIdx.x + 1; j<starSize; j++) { // If the current star hasn't been merged if(merge[j] == j) { double m2 = mass[j]; vec2d pos2 = vec2d(posX[j], posY[j]); vec2d vel2 = vec2d(velX[j], velY[j]); // Compute a vector between two points vec2d diff = pos - pos2; // Compute the distance between the two points double dist = diff.magnitude(); // If the objects are too close, merge them if(dist < (radius[blockIdx.x] + radius[j]) / 1.5) { merge[j] = blockIdx.x; mass[blockIdx.x] = m1 + m2; pos = (pos * m1 + pos2 * m2) / (m1 + m2); vel = (vel * m1 + vel2 * m2) / (m1 + m2); m1 = mass[blockIdx.x]; posX[blockIdx.x] = pos.x(); posY[blockIdx.x] = pos.y(); velX[blockIdx.x] = vel.x(); velY[blockIdx.x] = vel.y(); forceX[blockIdx.x] = 0; forceY[blockIdx.x] = 0; initialized[blockIdx.x] = 0; }// if else{ // Normalize the difference vector to be a unit vector diff = diff.normalized(); // Compute the force between these two stars vec2d force = -diff * G * m1 * m2 / (dist * dist); // Apply the force to both stars forceX[blockIdx.x] += force.x(); forceY[blockIdx.x] += force.y(); forceX[j] += (-force.x()); forceY[j] += (-force.y()); }// else }// if }// for }// if }// computeForce() // Update the positions of the star based on the forces acting on it. // This is done on the GPU __global__ void updateStar(double* mass, double* posX, double* posY, double* prev_posX, double* prev_posY, double* forceX, double* forceY, double* velX, double* velY, int* initialized, int starSize, int* merge){ // Check to see if star has not been merged if(merge[blockIdx.x] == blockIdx.x) { vec2d pos = vec2d(posX[blockIdx.x], posY[blockIdx.x]); vec2d prev_pos = vec2d(prev_posX[blockIdx.x], prev_posY[blockIdx.x]); vec2d force = vec2d(forceX[blockIdx.x], forceY[blockIdx.x]); vec2d vel = vec2d(velX[blockIdx.x], velY[blockIdx.x]); vec2d accel = force / mass[blockIdx.x]; // Verlet integration if(initialized[blockIdx.x] == 0) { // First step: no previous position vec2d next_pos = pos + vel * DT + accel / 2 * DT * DT; prev_pos = pos; pos = next_pos; initialized[blockIdx.x] = 1; }// if else { // Later steps vec2d next_pos = pos * 2 - prev_pos + accel * DT * DT; prev_pos = pos; pos = next_pos; }// else posX[blockIdx.x] = pos.x(); posY[blockIdx.x] = pos.y(); prev_posX[blockIdx.x] = prev_pos.x(); prev_posY[blockIdx.x] = prev_pos.y(); // Track velocity, even though this isn't strictly required vel += accel * DT; velX[blockIdx.x] = vel.x(); velY[blockIdx.x] = vel.y(); // Zero out the force forceX[blockIdx.x] = 0; forceY[blockIdx.x] = 0; }// if }// updateStar() // Create a circle of stars moving in the same direction around the center of mass void addRandomGalaxy(double center_x, double center_y) { // Random number of stars int count = rand() % 1000 + 1000; // Random radius double radius = drand(50, 200); // Create a vector for the center of the galaxy vec2d center = vec2d(center_x, center_y); // Clockwise or counter-clockwise? double direction = 1; if(rand() % 2 == 0) direction = -1; // Create `count` stars for(int i=0; i<count; i++) { // Generate a random angle double angle = drand(0, M_PI * 2); // Generate a random radius, biased toward the center double point_radius = drand(0, sqrt(radius)) * drand(0, sqrt(radius)); // Compute X and Y coordinates double x = point_radius * sin(angle); double y = point_radius * cos(angle); // Create a vector to hold the position of this star (origin at center of the "galaxy") vec2d pos = vec2d(x, y); // Move the star in the appropriate direction around the center, with slightly-random velocity vec2d vel = vec2d(-cos(angle), sin(angle)) * sqrt(point_radius) * direction * drand(0.25, 1.25); // Create a new random color for the star rgb32 color = rgb32(rand() % 64 + 192, rand() % 64 + 192, 128); // Add the star with a mass dependent on distance from the center of the "galaxy" stars.push_back(star(10 / sqrt(pos.magnitude()), pos + center, vel, color)); } } // Draw a circle at the given star's position // Uses method from http://groups.csail.mit.edu/graphics/classes/6.837/F98/Lecture6/circle.html void drawStar(bitmap* bmp, star s) { double center_x = s.pos().x(); double center_y = s.pos().y(); double radius = s.radius(); // Loop over points in the upper-right quadrant of the circle for(double x = 0; x <= radius*1.1; x++) { for(double y = 0; y <= radius*1.1; y++) { // Is this point within the circle's radius? double dist = sqrt(pow(x, 2) + pow(y, 2)); if(dist < radius) { // Set this point, along with the mirrored points in the other three quadrants bmp->set(center_x + x + x_offset, center_y + y + y_offset, s.color()); bmp->set(center_x + x + x_offset, center_y - y + y_offset, s.color()); bmp->set(center_x - x + x_offset, center_y - y + y_offset, s.color()); bmp->set(center_x - x + x_offset, center_y + y + y_offset, s.color()); } } } }
f18c4b99ee4f90585438bedfb469941914c23f13.cu
#include <cmath> #include <cstdio> #include <ctime> #include <vector> #include <time.h> #include <SDL.h> #include "bitmap.hh" #include "gui.hh" #include "star.hh" #include "util.hh" #include "vec2d.hh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> using namespace std; // Screen size #define WIDTH 640 #define HEIGHT 480 // Minimum time between clicks #define CREATE_INTERVAL 1000 // Time step size #define DT 0.04 // Gravitational constant #define G 1 // Update all stars in the simulation void updateStars(); //CUDA compute forces and update all stars __global__ void computeForce(double* mass, double* posX, double* posY, double* forceX, double* forceY, int starSize, double* velX, double* velY, double* radius, int* merge, int* initialized); //CUDA update star's positions __global__ void updateStar(double* mass, double* posX, double* posY, double* prev_posX, double* prev_posY, double* forceX, double* forceY, double* velX, double* velY, int* initialized, int starSize, int* merge); // Draw a circle on a bitmap based on this star's position and radius void drawStar(bitmap* bmp, star s); // Add a "galaxy" of stars to the points list void addRandomGalaxy(double center_x, double center_y); // A list of stars being simulated vector<star> stars; // Offset of the current view int x_offset = 0; int y_offset = 0; /** * Entry point for the program * \param argc The number of command line arguments * \param argv An array of command line arguments */ // Keep track of how many stars are shown int starSize; int main(int argc, char** argv) { // Seed the random number generator srand(time(NULL)); // Create a GUI window gui ui("Galaxy Simulation", WIDTH, HEIGHT); // Start with the running flag set to true bool running = true; // Render everything using this bitmap bitmap bmp(WIDTH, HEIGHT); // Save the last time the mouse was clicked bool mouse_up = true; // Keep track of how many galaxies were made int galaxies = 0; // Count the number of frames int numOfFrames = 0; time_t startTime; // Loop until we get a quit event while(running) { // Increment number of frames numOfFrames++; // Keep track of runtime startTime = time_ms(); // Process events SDL_Event event; while(SDL_PollEvent(&event) == 1) { // If the event is a quit event, then leave the loop if(event.type == SDL_QUIT) running = false; } // Get the current mouse state int mouse_x, mouse_y; uint32_t mouse_state = SDL_GetMouseState(&mouse_x, &mouse_y); // If the left mouse button is pressed, create a new random "galaxy" if(mouse_state & SDL_BUTTON(SDL_BUTTON_LEFT)) { // Only create one if the mouse button has been released if(mouse_up) { addRandomGalaxy(mouse_x - x_offset, mouse_y - y_offset); galaxies++; // Increment galaxy count // Don't create another one until the mouse button is released mouse_up = false; } } else { // The mouse button was released mouse_up = true; } // Get the keyboard state const uint8_t* keyboard = SDL_GetKeyboardState(NULL); // If the up key is pressed, shift up one pixel if(keyboard[SDL_SCANCODE_UP]) { y_offset++; bmp.shiftDown(); // Shift pixels so scrolling doesn't create trails } // If the down key is pressed, shift down one pixel if(keyboard[SDL_SCANCODE_DOWN]) { y_offset--; bmp.shiftUp(); // Shift pixels so scrolling doesn't create trails } // If the right key is pressed, shift right one pixel if(keyboard[SDL_SCANCODE_RIGHT]) { x_offset--; bmp.shiftLeft(); // Shift pixels so scrolling doesn't create trails } // If the left key is pressed, shift left one pixel if(keyboard[SDL_SCANCODE_LEFT]) { x_offset++; bmp.shiftRight(); // Shift pixels so scrolling doesn't create trails } // Remove stars that have NaN positions for(int i=0; i<stars.size(); i++) { // Remove this star if it is too far from zero or has NaN position if(stars[i].pos().x() != stars[i].pos().x() || // A NaN value does not equal itself stars[i].pos().y() != stars[i].pos().y()) { stars.erase(stars.begin()+i); i--; continue; } } // Update star count starSize = stars.size(); // Create arrays for the star attributes // This is needed to compute forces and update stars on GPU double starMass[starSize]; double starPosX[starSize]; double starPrevX[starSize]; double starForceX[starSize]; double starVelX[starSize]; double starPosY[starSize]; double starPrevY[starSize]; double starForceY[starSize]; double starVelY[starSize]; int starInit[starSize]; double starRad[starSize]; int starMerge[starSize]; //Lets us know if a star has been merged. // Assign values to arrays. for(int i=0; i <starSize; i++) { starMass[i] = stars[i].mass(); starPosX[i] = stars[i].pos().x(); starPrevX[i] = stars[i].prev_pos().x(); starForceX[i] = stars[i].force().x(); starVelX[i] = stars[i].vel().x(); starPosY[i] = stars[i].pos().y(); starPrevY[i] = stars[i].prev_pos().y(); starForceY[i] = stars[i].force().y(); starVelY[i] = stars[i].vel().y(); starInit[i] = stars[i].initialized(); starRad[i] = stars[i].radius(); starMerge[i] = i; } // Create empty arrays for GPU double* starMassGPU; double* starPosXGPU; double* starPrevXGPU; double* starForceXGPU; double* starVelXGPU; double* starPosYGPU; double* starPrevYGPU; double* starForceYGPU; double* starVelYGPU; int* starInitGPU; double* starRadGPU; int* starMergeGPU; // Malloc all GPU arrays if(cudaMalloc(&starMassGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starMassGPU on GPU\n"); exit(2); } if(cudaMalloc(&starPosXGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starPosXGPU on GPU\n"); exit(2); } if(cudaMalloc(&starPrevXGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starPrevXGPU on GPU\n"); exit(2); } if(cudaMalloc(&starForceXGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starForceXGPU on GPU\n"); exit(2); } if(cudaMalloc(&starVelXGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starVelXGPU on GPU\n"); exit(2); } if(cudaMalloc(&starPosYGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starPosYGPU on GPU\n"); exit(2); } if(cudaMalloc(&starPrevYGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starPrevYGPU on GPU\n"); exit(2); } if(cudaMalloc(&starForceYGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starForceYGPU on GPU\n"); exit(2); } if(cudaMalloc(&starVelYGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starVelYGPU on GPU\n"); exit(2); } if(cudaMalloc(&starInitGPU, sizeof(int) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starInitGPU on GPU\n"); exit(2); } if(cudaMalloc(&starRadGPU, sizeof(double) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starRadGPU on GPU\n"); exit(2); } if(cudaMalloc(&starMergeGPU, sizeof(int) * (starSize)) != cudaSuccess) { fprintf(stderr, "Failed to allocate starGPUGPU on GPU\n"); exit(2); } //Copy the host data to device if(cudaMemcpy(starMassGPU, starMass, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starMassGPU to the GPU"); } if(cudaMemcpy(starPosXGPU, starPosX, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starPosXGPU to the GPU"); } if(cudaMemcpy(starPrevXGPU, starPrevX, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starPrevXGPU to the GPU"); } if(cudaMemcpy(starForceXGPU, starForceX, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starForceXGPU to the GPU"); } if(cudaMemcpy(starVelXGPU, starVelX, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starVelXGPU to the GPU"); } if(cudaMemcpy(starPosYGPU, starPosY, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starPosYGPU to the GPU"); } if(cudaMemcpy(starPrevYGPU, starPrevY, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starPrevYGPU to the GPU"); } if(cudaMemcpy(starForceYGPU, starForceY, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starForceYGPU to the GPU"); } if(cudaMemcpy(starVelYGPU, starVelY, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starVelYGPU to the GPU"); } if(cudaMemcpy(starInitGPU, starInit, sizeof(int) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the GPU"); } if(cudaMemcpy(starRadGPU, starRad, sizeof(double) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starRadGPU to the GPU"); } if(cudaMemcpy(starMergeGPU, starMerge, sizeof(int) * (starSize), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "Failed to copy starMergeGPU to the GPU"); } // Compute forces on GPU with blocks for each star computeForce<<<starSize,1>>> (starMassGPU, starPosXGPU, starPosYGPU, starForceXGPU, starForceYGPU, starSize, starVelXGPU, starVelYGPU, starRadGPU, starMergeGPU, starInitGPU); cudaDeviceSynchronize(); // Update star positions on GPU with blocks for each star updateStar<<<starSize,1>>>(starMassGPU, starPosXGPU, starPosYGPU, starPrevXGPU, starPrevYGPU, starForceXGPU, starForceYGPU, starVelXGPU, starVelYGPU, starInitGPU, starSize, starMergeGPU); cudaDeviceSynchronize(); // Copy the device data to the host if(cudaMemcpy(&starMass, starMassGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starMassGPU to the CPU\n"); } if(cudaMemcpy(&starPosX, starPosXGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starPosXGPU to the CPU\n"); } if(cudaMemcpy(&starPrevX, starPrevXGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starPrevXGPU to the CPU\n"); } if(cudaMemcpy(&starVelX, starVelXGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starVelXGPU to the CPU\n"); } if(cudaMemcpy(&starPosY, starPosYGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starPosYGPU to the CPU\n"); } if(cudaMemcpy(&starPrevY, starPrevYGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starPrevYGPU to the CPU\n"); } if(cudaMemcpy(&starVelY, starVelYGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starVelYGPU to the CPU\n"); } if(cudaMemcpy(&starInit, starInitGPU, sizeof(int) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the CPU\n"); } if(cudaMemcpy(&starRad, starRadGPU, sizeof(double) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starRadGPU to the CPU\n"); } if(cudaMemcpy(&starMerge, starMergeGPU, sizeof(int) * (starSize), cudaMemcpyDeviceToHost) != cudaSuccess) { fprintf(stderr, "Failed to copy starInitGPU to the CPU\n"); } // Free the arrays on the GPU cudaFree(starMassGPU); cudaFree(starPosXGPU); cudaFree(starPrevXGPU); cudaFree(starVelXGPU); cudaFree( starForceXGPU); cudaFree(starPosYGPU); cudaFree(starPrevYGPU); cudaFree(starForceYGPU); cudaFree(starVelYGPU); cudaFree(starInitGPU); cudaFree(starMergeGPU); cudaFree(starRadGPU); // Update the stars in the vector with the data from the arrays // Also remove the stars that were merged int displacement = 0; int j = 0; for(int i=0; i <(starSize - displacement); i++) { if(starMerge[j] == j) { stars[i].changeMass(starMass[j]); stars[i].changePos(vec2d(starPosX[j], starPosY[j])); stars[i].changePrev(vec2d(starPrevX[j], starPrevY[j])); stars[i].changeVel(vec2d(starVelX[j], starVelY[j])); stars[i].changeInit(starInit[j]); }// if else { stars.erase(stars.begin() + i); i--; displacement++; }// else j++; }// fpr // Darken the bitmap instead of clearing it to leave trails bmp.darken(0.92); // Draw stars for(int i=0; i<stars.size(); i++) { drawStar(&bmp, stars[i]); }// for // Display the rendered frame ui.display(bmp); // Calculate the run time time_t endTime = time_ms(); time_t elapsedTime = endTime - startTime; // Print frame data for 2000 frames if (numOfFrames < 2000) printf("%d, %d, %lu\n", galaxies, numOfFrames, elapsedTime); }// while return 0; }// main // Compute the force on the star based on the forces on the other stars // This is done on the GPU __global__ void computeForce(double* mass, double* posX, double* posY, double* forceX, double* forceY, int starSize, double* velX, double* velY, double* radius, int* merge, int* initialized){ // If the star has not been merged if(merge[blockIdx.x] == blockIdx.x) { double m1 = mass[blockIdx.x]; vec2d pos = vec2d(posX[blockIdx.x], posY[blockIdx.x]); vec2d vel = vec2d(velX[blockIdx.x], velY[blockIdx.x]); // Loop on all other stars for(int j = blockIdx.x + 1; j<starSize; j++) { // If the current star hasn't been merged if(merge[j] == j) { double m2 = mass[j]; vec2d pos2 = vec2d(posX[j], posY[j]); vec2d vel2 = vec2d(velX[j], velY[j]); // Compute a vector between two points vec2d diff = pos - pos2; // Compute the distance between the two points double dist = diff.magnitude(); // If the objects are too close, merge them if(dist < (radius[blockIdx.x] + radius[j]) / 1.5) { merge[j] = blockIdx.x; mass[blockIdx.x] = m1 + m2; pos = (pos * m1 + pos2 * m2) / (m1 + m2); vel = (vel * m1 + vel2 * m2) / (m1 + m2); m1 = mass[blockIdx.x]; posX[blockIdx.x] = pos.x(); posY[blockIdx.x] = pos.y(); velX[blockIdx.x] = vel.x(); velY[blockIdx.x] = vel.y(); forceX[blockIdx.x] = 0; forceY[blockIdx.x] = 0; initialized[blockIdx.x] = 0; }// if else{ // Normalize the difference vector to be a unit vector diff = diff.normalized(); // Compute the force between these two stars vec2d force = -diff * G * m1 * m2 / (dist * dist); // Apply the force to both stars forceX[blockIdx.x] += force.x(); forceY[blockIdx.x] += force.y(); forceX[j] += (-force.x()); forceY[j] += (-force.y()); }// else }// if }// for }// if }// computeForce() // Update the positions of the star based on the forces acting on it. // This is done on the GPU __global__ void updateStar(double* mass, double* posX, double* posY, double* prev_posX, double* prev_posY, double* forceX, double* forceY, double* velX, double* velY, int* initialized, int starSize, int* merge){ // Check to see if star has not been merged if(merge[blockIdx.x] == blockIdx.x) { vec2d pos = vec2d(posX[blockIdx.x], posY[blockIdx.x]); vec2d prev_pos = vec2d(prev_posX[blockIdx.x], prev_posY[blockIdx.x]); vec2d force = vec2d(forceX[blockIdx.x], forceY[blockIdx.x]); vec2d vel = vec2d(velX[blockIdx.x], velY[blockIdx.x]); vec2d accel = force / mass[blockIdx.x]; // Verlet integration if(initialized[blockIdx.x] == 0) { // First step: no previous position vec2d next_pos = pos + vel * DT + accel / 2 * DT * DT; prev_pos = pos; pos = next_pos; initialized[blockIdx.x] = 1; }// if else { // Later steps vec2d next_pos = pos * 2 - prev_pos + accel * DT * DT; prev_pos = pos; pos = next_pos; }// else posX[blockIdx.x] = pos.x(); posY[blockIdx.x] = pos.y(); prev_posX[blockIdx.x] = prev_pos.x(); prev_posY[blockIdx.x] = prev_pos.y(); // Track velocity, even though this isn't strictly required vel += accel * DT; velX[blockIdx.x] = vel.x(); velY[blockIdx.x] = vel.y(); // Zero out the force forceX[blockIdx.x] = 0; forceY[blockIdx.x] = 0; }// if }// updateStar() // Create a circle of stars moving in the same direction around the center of mass void addRandomGalaxy(double center_x, double center_y) { // Random number of stars int count = rand() % 1000 + 1000; // Random radius double radius = drand(50, 200); // Create a vector for the center of the galaxy vec2d center = vec2d(center_x, center_y); // Clockwise or counter-clockwise? double direction = 1; if(rand() % 2 == 0) direction = -1; // Create `count` stars for(int i=0; i<count; i++) { // Generate a random angle double angle = drand(0, M_PI * 2); // Generate a random radius, biased toward the center double point_radius = drand(0, sqrt(radius)) * drand(0, sqrt(radius)); // Compute X and Y coordinates double x = point_radius * sin(angle); double y = point_radius * cos(angle); // Create a vector to hold the position of this star (origin at center of the "galaxy") vec2d pos = vec2d(x, y); // Move the star in the appropriate direction around the center, with slightly-random velocity vec2d vel = vec2d(-cos(angle), sin(angle)) * sqrt(point_radius) * direction * drand(0.25, 1.25); // Create a new random color for the star rgb32 color = rgb32(rand() % 64 + 192, rand() % 64 + 192, 128); // Add the star with a mass dependent on distance from the center of the "galaxy" stars.push_back(star(10 / sqrt(pos.magnitude()), pos + center, vel, color)); } } // Draw a circle at the given star's position // Uses method from http://groups.csail.mit.edu/graphics/classes/6.837/F98/Lecture6/circle.html void drawStar(bitmap* bmp, star s) { double center_x = s.pos().x(); double center_y = s.pos().y(); double radius = s.radius(); // Loop over points in the upper-right quadrant of the circle for(double x = 0; x <= radius*1.1; x++) { for(double y = 0; y <= radius*1.1; y++) { // Is this point within the circle's radius? double dist = sqrt(pow(x, 2) + pow(y, 2)); if(dist < radius) { // Set this point, along with the mirrored points in the other three quadrants bmp->set(center_x + x + x_offset, center_y + y + y_offset, s.color()); bmp->set(center_x + x + x_offset, center_y - y + y_offset, s.color()); bmp->set(center_x - x + x_offset, center_y - y + y_offset, s.color()); bmp->set(center_x - x + x_offset, center_y + y + y_offset, s.color()); } } } }
c9e223333c5f8fe6f51e0b0fcf5aaaac6acf2a07.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } __global__ void DYbinaryentropy(const int lengthX, const double *x, const double *y, const double *t, double *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] += t[0]*((y[i]-x[i])/(y[i]*(1.0-y[i])))/lengthX; } }
c9e223333c5f8fe6f51e0b0fcf5aaaac6acf2a07.cu
#include "includes.h" extern "C" { } __global__ void DYbinaryentropy(const int lengthX, const double *x, const double *y, const double *t, double *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] += t[0]*((y[i]-x[i])/(y[i]*(1.0-y[i])))/lengthX; } }
bc839bd67bb886ea57bc8a232615f22c1e9d938f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../../gpu_utils/gpu_utils.h" #include <stddef.h> #include <stdint.h> #include "li_rudy_2011.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { log_info("Using Li & Rudy 2011 GPU model\n"); uint32_t num_volumes = solver->original_num_cells; // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(solver->sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, solver->sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES(solve_model_odes_gpu) { size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adaptive mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { *((real * )((char *) sv + pitch * 0) + threadID) = -84.058830; // V millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.000821; // m dimensionless *((real * )((char *) sv + pitch * 2) + threadID) = 0.995741; // h dimensionless *((real * )((char *) sv + pitch * 3) + threadID) = 0.999872; // j dimensionless *((real * )((char *) sv + pitch * 4) + threadID) = 0.000016; // d dimensionless *((real * )((char *) sv + pitch * 5) + threadID) = 0.999193; // f dimensionless *((real * )((char *) sv + pitch * 6) + threadID) = 0.988692; // f2 dimensionless *((real * )((char *) sv + pitch * 7) + threadID) = 0.965405; // fca dimensionless *((real * )((char *) sv + pitch * 8) + threadID) = 0.739378; // fca2 dimensionless *((real * )((char *) sv + pitch * 9) + threadID) = 0.001114; // xs1 dimensionless *((real * )((char *) sv + pitch * 10) + threadID) = 0.042234; // xs2 dimensionless *((real * )((char *) sv + pitch * 11) + threadID) = 0.069808; // xr dimensionless *((real * )((char *) sv + pitch * 12) + threadID) = 0.000119; // a dimensionless *((real * )((char *) sv + pitch * 13) + threadID) = 0.992541; // i dimensionless *((real * )((char *) sv + pitch * 14) + threadID) = 0.745628; // i2 dimensionless *((real * )((char *) sv + pitch * 15) + threadID) = 0.000329; // ml dimensionless *((real * )((char *) sv + pitch * 16) + threadID) = 0.046538; // ml3 dimensionless *((real * )((char *) sv + pitch * 17) + threadID) = 0.984170; // hl dimensionless *((real * )((char *) sv + pitch * 18) + threadID) = 0.853893; // hl3 dimensionless *((real * )((char *) sv + pitch * 19) + threadID) = 0.912569; // jl dimensionless *((real * )((char *) sv + pitch * 20) + threadID) = 0.827885; // jl3 dimensionless *((real * )((char *) sv + pitch * 21) + threadID) = 0.000135; // casss dimensionless *((real * )((char *) sv + pitch * 22) + threadID) = 1.510741; // cajsr dimensionless *((real * )((char *) sv + pitch * 23) + threadID) = 1.537577; // cacsr dimensionless *((real * )((char *) sv + pitch * 24) + threadID) = 1.538668; // cansr dimensionless *((real * )((char *) sv + pitch * 25) + threadID) = 0.000130; // cassl dimensionless *((real * )((char *) sv + pitch * 26) + threadID) = 11.501546; // nai dimensionless *((real * )((char *) sv + pitch * 27) + threadID) = 11.501230; // nassl dimensionless *((real * )((char *) sv + pitch * 28) + threadID) = 11.501240; // nasss dimensionless *((real * )((char *) sv + pitch * 29) + threadID) = 136.422946; // ki dimensionless *((real * )((char *) sv + pitch * 30) + threadID) = 0.000053; // cai millimolar *((real * )((char *) sv + pitch * 31) + threadID) = 0.000437; // b dimensionless *((real * )((char *) sv + pitch * 32) + threadID) = 0.990384; // g dimensionless *((real * )((char *) sv + pitch * 33) + threadID) = 0.535627; // u dimensionless *((real * )((char *) sv + pitch * 34) + threadID) = 0.182859; // y dimensionless *((real * )((char *) sv + pitch * 35) + threadID) = 0.010600; // camktrap dimensionless // Additional parameters *((real * )((char *) sv + pitch * 36) + threadID) = 0.0; // ical *((real * )((char *) sv + pitch * 37) + threadID) = 0.0; // camkactive *((real * )((char *) sv + pitch * 38) + threadID) = 0.0; // qrel1 *((real * )((char *) sv + pitch * 39) + threadID) = 0.0; // qrel2 *((real * )((char *) sv + pitch * 40) + threadID) = 0.0; // qup2 } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { // Compute Right-hand-side of the ODE's RHS_gpu(sv, rDY, stim_currents[threadID], dt, sv_id); // Solve the ODE's using a mix between Forward Euler and Rush-Larsen for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, real dt, int threadID_) { //const double dtmin = 0.001; //const double dtmed = 0.005; //const double dtmax = 0.1; real v_old = *((real*)((char*)sv_ + pitch * 0) + threadID_); real m_old = *((real*)((char*)sv_ + pitch * 1) + threadID_); real h_old = *((real*)((char*)sv_ + pitch * 2) + threadID_); real j_old = *((real*)((char*)sv_ + pitch * 3) + threadID_); real d_old = *((real*)((char*)sv_ + pitch * 4) + threadID_); real f_old = *((real*)((char*)sv_ + pitch * 5) + threadID_); real f2_old = *((real*)((char*)sv_ + pitch * 6) + threadID_); real fca_old = *((real*)((char*)sv_ + pitch * 7) + threadID_); real fca2_old = *((real*)((char*)sv_ + pitch * 8) + threadID_); real xs1_old = *((real*)((char*)sv_ + pitch * 9) + threadID_); real xs2_old = *((real*)((char*)sv_ + pitch * 10) + threadID_); real xr_old = *((real*)((char*)sv_ + pitch * 11) + threadID_); real a_old = *((real*)((char*)sv_ + pitch * 12) + threadID_); real i_old = *((real*)((char*)sv_ + pitch * 13) + threadID_); real i2_old = *((real*)((char*)sv_ + pitch * 14) + threadID_); real ml_old = *((real*)((char*)sv_ + pitch * 15) + threadID_); real ml3_old = *((real*)((char*)sv_ + pitch * 16) + threadID_); real hl_old = *((real*)((char*)sv_ + pitch * 17) + threadID_); real hl3_old = *((real*)((char*)sv_ + pitch * 18) + threadID_); real jl_old = *((real*)((char*)sv_ + pitch * 19) + threadID_); real jl3_old = *((real*)((char*)sv_ + pitch * 20) + threadID_); real casss_old = *((real*)((char*)sv_ + pitch * 21) + threadID_); real cajsr_old = *((real*)((char*)sv_ + pitch * 22) + threadID_); real cacsr_old = *((real*)((char*)sv_ + pitch * 23) + threadID_); real cansr_old = *((real*)((char*)sv_ + pitch * 24) + threadID_); real cassl_old = *((real*)((char*)sv_ + pitch * 25) + threadID_); real nai_old = *((real*)((char*)sv_ + pitch * 26) + threadID_); real nassl_old = *((real*)((char*)sv_ + pitch * 27) + threadID_); real nasss_old = *((real*)((char*)sv_ + pitch * 28) + threadID_); real ki_old = *((real*)((char*)sv_ + pitch * 29) + threadID_); real cai_old = *((real*)((char*)sv_ + pitch * 30) + threadID_); real b_old = *((real*)((char*)sv_ + pitch * 31) + threadID_); real g_old = *((real*)((char*)sv_ + pitch * 32) + threadID_); real u_old = *((real*)((char*)sv_ + pitch * 33) + threadID_); real y_old = *((real*)((char*)sv_ + pitch * 34) + threadID_); real camktrap_old = *((real*)((char*)sv_ + pitch * 35) + threadID_); real ical = *((real*)((char*)sv_ + pitch * 36) + threadID_); real camkactive = *((real*)((char*)sv_ + pitch * 37) + threadID_); real qrel1 = *((real*)((char*)sv_ + pitch * 38) + threadID_); real qrel2 = *((real*)((char*)sv_ + pitch * 39) + threadID_); real qup2 = *((real*)((char*)sv_ + pitch * 40) + threadID_); // Parameters // CELL GEOMETRY const real pi = 3.14; const real radius = 0.00175; const real length = 0.0164; const real rcg = 1.54; const real vcell = 1000*pi*radius*radius*length; const real ageo = 2*pi*radius*radius + 2*pi*radius*length; const real acap = rcg*ageo; const real vmyo = vcell * 0.60; const real vnsr = vcell * 0.04; //const real vmito = vcell * 0.18; const real vjsr = vcell * 0.002; const real vcsr = vcell * 0.008; const real vsss = vcell * 0.02; const real vssl = vcell * 0.15; // PHYSICAL CONSTANTS const real frdy = 96485; const real R = 8314; const real temp = 310; const real nao = 140; const real cao = 1.8; const real ko = 5.4; //const real clo = 100; const real zna = 1; const real zk = 1; //const real zcl = -1; const real zca = 2; //const real ganai = 0.75; //const real ganao = 0.75; //const real gaki = 0.75; //const real gako = 0.75; const real gacai = 1.0; const real gacao = 0.341; // CAMKII DYNAMICS const real camk0 = 0.05; const real alphacamk = 0.05; const real betacamk = 0.00068; const real kmcam = 0.0015; const real kmcamk = 0.15; //const real fca_dtaucamkbar = 10.0; // MEMBRANE IONIC CURRENTS const real gna = 18; const real gnal2 = 0.052; const real gnal3 = 0.018; const real pca = 1.9926e-4; //const real powtau = 10; const real gcat = 0.07875; const real gtos = 0.1414; const real gtof = 0.042; const real prnak = 0.014; //const real gnab = 0.0025; const real pcab = 3.99e-8; const real pnab = 0.64e-8; const real inacamax = 2.52; const real kmcaact = 0.000125; const real kmnai1 = 12.3; const real kmnao = 87.5; const real kmcai = 0.0036; const real kmcao = 1.3; const real nu = 0.35; const real ksat = 0.27; const real ibarnak = 1.1004; const real ipcabar = 0.0115; const real kmpca = 0.0005; // CALCIUM FLUXES AND CONCENTRATIONS const real IP3 = 0.0001; const real k1 = 150000; const real k1a = 16.5; const real k0 = 96000; const real k0a = 9.6; const real k2 = 1800; const real k2a = 0.21; const real tauip3r = 3.7; const real dqupcamkbar = 0.75; const real dkmplbbar = 0.00017; const real kmup = 0.00028; const real nsrbar = 15.0; const real bsrbar = 0.019975; const real kmbsr = 0.00087; const real bslbar = 0.4777; const real kmbsl = 0.0087; const real csqnbar = 2.88; const real kmcsqn = 0.8; const real cmdnbar = 0.1125; const real kmcmdn = 2.38e-3; const real trpnbar = 3.15e-2; const real kmtrpn = 0.5e-3; const real trpnbar1 = 3.5e-3; const real cmdnbar1 = 1.25e-2; const real csqnbar1 = 1.2; // CALCIUM FLUXES RATE CONSTANTS const real tautr1 = 120; const real tautr2 = 120; const real gaptau = 12; const real sstau = 0.2; // comp_revs() real eca = (R*temp/(zca*frdy))*log(cao/cassl_old); real ena = (R*temp/frdy)*log(nao/nassl_old); real ek = (R*temp/frdy)*log(ko/ki_old); // comp_ina() real ma = 0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13))); real mb = 0.16*exp(-v_old/11); real ha, hb, ja, jb; if (v_old<-40) { ha = 0.135*exp((70+v_old)/-6.8); hb = 3.56*exp(0.079*v_old)+310000*exp(0.35*v_old); ja = (-127140*exp(0.2444*v_old)-0.003474*exp(-0.04391*v_old))*(v_old+37.78)/(1+exp(0.311*(v_old+79.23))); jb = 0.1212*exp(-0.01052*v_old)/(1+exp(-0.1378*(v_old+40.14))); } else { ha = 0.0; hb = 1/(0.13*(1+exp((v_old+10.66)/-11.1))); ja = 0.0; jb = 0.3*exp(-0.0000002535*v_old)/(1+exp(-0.1*(v_old+32))); } real mtau = 1/(ma+mb); real htau = 1/(ha + hb); real jtau = 1/(ja+jb); real mss = ma*mtau; real hss = ha*htau; real jss = 1*ja*jtau; // Rush-Larsen m_old = mss-(mss-m_old)*exp(-dt/mtau); h_old = hss-(hss-h_old)*exp(-dt/htau); j_old = jss-(jss-j_old)*exp(-dt/jtau); real ina = gna*pow(m_old,3)*h_old*j_old*(v_old-ena); // comp_inal() real mltau = 1/(0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13))) + 0.16*exp(-v_old/11)); real ml3tau = mltau; real mlss = 1/(1+exp(-(v_old+28)/7)); real ml3ss = 1/(1+exp(-(v_old+63)/7)); real hltau = 162+132/(1+exp(-(v_old+28)/5.5)); real hl3tau = 0.5*hltau; real hlss = 1/(1+exp((v_old+28)/12)); real hl3ss = 1/(1+exp((v_old+63)/12)); real jltau = 411; real jl3tau = 0.5*jltau; real jlss = hlss; real jl3ss = hl3ss; // Rush-Larsen ml_old = mlss-(mlss-ml_old)*exp(-dt/mltau); ml3_old = ml3ss-(ml3ss-ml3_old)*exp(-dt/ml3tau); hl_old = hlss-(hlss-hl_old)*exp(-dt/hltau); hl3_old = hl3ss-(hl3ss-hl3_old)*exp(-dt/hl3tau); jl_old = jlss-(jlss-jl_old)*exp(-dt/jltau); jl3_old = jl3ss-(jl3ss-jl3_old)*exp(-dt/jl3tau); real inal2 = gnal2*ml_old*hl_old*jl_old*(v_old-ena); real inal3 = gnal3*ml3_old*hl3_old*jl3_old*(v_old-ena); real inal = inal2 + inal3; // comp_inab() real inab = pnab*frdy*((frdy*v_old)/(R*temp))*(nassl_old*exp((frdy*v_old)/(R*temp)) - nao)/(exp((frdy*v_old)/(R*temp))-1); // comp_ical() real ibarca = pca*zca*zca*(((v_old-15)*frdy*frdy)/(R*temp))*((gacai*casss_old*exp((zca*(v_old-15)*frdy)/(R*temp))-gacao*cao)/(exp((zca*(v_old-15)*frdy)/(R*temp))-1)); real dss = (1/(1.0+exp(-(v_old-2.0)/7.8))); real dtau = (0.59+0.8*exp(0.052*(v_old+13))/(1+exp(0.132*(v_old+13)))); real fss = 1/(1.0 + exp((v_old+16.5)/9.5)); real ftau = 0.92/(0.125*exp(-(0.058*(v_old-2.5))*(0.045*(v_old-2.5)))+0.1); real f2ss = fss; real f2tau = 0.90/(0.02*exp(-(0.04*(v_old-18.6))*(0.045*(v_old-18.6)))+0.005); real fcass = 0.3/(1 - ical/0.05) + 0.55/(1.0+casss_old/0.003)+0.15; real fcatau = 10*camkactive/(camkactive+kmcam) + 0.5+1/(1.0+casss_old/0.003); real fca2ss = 1.0/(1.0-ical/0.01); real fca2tau = 1*(300.0/(1.0+exp((-ical-0.175)/0.04))+125.0); // Rush-Larsen d_old = dss-(dss-d_old)*exp(-dt/dtau); f_old = fss-(fss-f_old)*exp(-dt/ftau); f2_old = f2ss-(f2ss-f2_old)*exp(-dt/f2tau); fca_old = fcass-(fcass-fca_old)*exp(-dt/fcatau); fca2_old = fca2ss-(fca2ss-fca2_old)*exp(-dt/fca2tau); ical = d_old*f_old*f2_old*fca_old*fca2_old*ibarca; // comp_icat() real bss = 1/(1+ exp (-(v_old+30)/7)); real gss = 1/(1+exp((v_old+61)/5)); real taub = 1/(1.068*exp((v_old+16.3)/30)+1.068*exp(-(v_old+16.3)/30)); real taug = 1/(0.015*exp(-(v_old+71.7)/83.3)+0.015*exp((v_old+71.7)/15.4)); // Rush-Larsen b_old = bss-(bss-b_old)*exp(-dt/taub); g_old = gss-(gss-g_old)*exp(-dt/taug); real icat = gcat*b_old*g_old*(v_old-eca); // comp_icab() real icab = pcab*zca*zca*((v_old*frdy*frdy)/(R*temp))*((gacai*cassl_old*exp((zca*v_old*frdy)/(R*temp))-gacao*cao)/(exp((zca*v_old*frdy)/(R*temp))-1)); // comp_itol() real atau = 1/(25*exp((v_old-82)/18)/(1+exp((v_old-82)/18))+25*exp(-(v_old+52)/18)/(1+exp(-(v_old+52)/18))); real itau = 2.86+ 1/(exp(-(v_old+125)/15)*0.1 + 0.1*exp((v_old+2)/26.5)); real i2tau = 21.5+ 1/(exp(-(v_old+138.2)/52)*0.005 + 0.003*exp((v_old+18)/12.5)); real ass = 1/(1+exp(-(v_old-8.9)/10.3)); real iss = 1/(1+exp((v_old+30)/11)); real i2ss = iss; // Rush-Larsen a_old = ass-(ass-a_old)*exp(-dt/atau); i_old = iss-(iss-i_old)*exp(-dt/itau); i2_old = i2ss-(i2ss-i2_old)*exp(-dt/i2tau); real itos = gtos*a_old*i_old*i2_old*(v_old-ek); real itof = gtof*(v_old-ek)/(1+exp(-(v_old-3)/19.8)); real ito1 = itos + itof; // comp_ikr() real gkr = 0.0326*sqrt(ko/5.4); real xrss = 1/(1+exp(-(v_old)/15)); real xrtau = 400.0/(1.0+exp(v_old/10.0)) + 100.0; real rkr = 1/(1+exp((v_old)/35)); // Rush-Larsen xr_old = xrss-(xrss-xr_old)*exp(-dt/xrtau); real ikr = gkr*xr_old*rkr*(v_old-ek); // comp_iks() real eks = (R*temp/frdy)*log((ko+prnak*nao)/(ki_old+prnak*nassl_old)); real gks = 0.053*(1+0.6/(1+pow((0.000038/cassl_old),1.4))); real xsss = 1/(1+exp(-(v_old-9)/13.7)); real xs1tau = 200/(exp(-(v_old+10)/6) + exp((v_old-62)/55)); real xs2tau = 1500+ 350/(exp(-(v_old+10)/4) + exp((v_old-90)/58)); // Rush-Larsen xs1_old = xsss-(xsss-xs1_old)*exp(-dt/xs1tau); xs2_old = xsss-(xsss-xs2_old)*exp(-dt/xs2tau); real iks = gks*xs1_old*xs2_old*(v_old-eks); // comp_ik1() real k1ss = 1/(1+exp((v_old+103-(2.9+ko*2.175))/10.15)); real gk1 = 0.12*sqrt(ko); real ik1 = gk1*k1ss*(v_old-ek); // comp_inaca() real allo = 1/(1+pow((kmcaact/(1.5*casss_old)),2)); real num = inacamax*(pow(nasss_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*casss_old*exp((nu-1)*v_old*frdy/(R*temp))); real denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp)); real denomterm1 = kmcao*pow(nasss_old,3)+pow(kmnao,3)*1.5*casss_old+pow(kmnai1,3)*cao*(1+1.5*casss_old/kmcai); real denomterm2 = kmcai*pow(nao,3)*(1+pow(nasss_old/kmnai1,3))+pow(nasss_old,3)*cao+pow(nao,3)*1.5*casss_old; real deltaE = num/(denommult*(denomterm1+denomterm2)); real inacass = 0.2*allo*deltaE; allo = 1/(1+pow((kmcaact/(1.5*cassl_old)),2)); num = inacamax*(pow(nassl_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*cassl_old*exp((nu-1)*v_old*frdy/(R*temp))); denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp)); denomterm1 = kmcao*pow(nassl_old,3)+pow(kmnao,3)*1.5*cassl_old+pow(kmnai1,3)*cao*(1+1.5*cassl_old/kmcai); denomterm2 = kmcai*pow(nao,3)*(1+pow(nassl_old/kmnai1,3))+pow(nassl_old,3)*cao+pow(nao,3)*1.5*cassl_old; deltaE = num/(denommult*(denomterm1+denomterm2)); real inaca = 0.8*allo*deltaE; // comp_inak() real inak = ibarnak*(1/(1+exp(-1*(v_old+92)*frdy/(R*temp))))*pow((nassl_old/(nassl_old+2.6)),3)*(ko/(ko+0.8)); // comp_ipca() real ipca = ipcabar/((kmpca/cassl_old)+1); // comp_if() real yss = 1/(1+exp((v_old+87)/9.5)); real ytau = 2000/(exp(-(v_old+132)/10) + exp((v_old+57)/60)); // Rush-Larsen y_old = yss - (yss-y_old)*exp(-dt/ytau); real ifna = 0.012*y_old*y_old*(v_old-ena); real ifk = 0.024*y_old*y_old*(v_old-ek); //real iftotal = ifna + ifk; // comp_istim() real istim = stim_current; // comp_itot() real icatot = ical+icat+ipca+icab-2*inaca-2*inacass; real iktot = ikr+iks+ik1-2*inak+ito1+ifk+1*istim; real inatot = 3*inak+ina+3*inaca+3*inacass+inal+ifna+inab; real itot = icatot+iktot+inatot; // comp_ip3() // Forward Euler real du = dt*(casss_old*k2*(1-u_old) - k2a*u_old); u_old += du; real POip3 = tauip3r*IP3*casss_old*(1-u_old)/((1+IP3*k0/k0a)*(1+casss_old*k1/k1a)); real qip3 = 10.920*(cajsr_old-casss_old)*(POip3); // comp_qrel1() real qdiff = (casss_old-cassl_old)/sstau; real REL = -((ical)*acap/(vsss*2.0*frdy) - (qrel1 + qip3)*vjsr/vsss + qdiff); real ireltau = 2*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cajsr_old)); real irelss; if (REL > 0) irelss = 15*(1+1*(1/(1+pow((0.28/camkactive),8))))*REL/(1 + pow((1.0/cajsr_old),8)); else irelss = 0; // Forward Euler qrel1 += dt*((irelss-qrel1)/ireltau); // comp_qrel2() real qgap = (cassl_old-cai_old)/gaptau; REL = (-qup2*vnsr/vmyo + qgap*vssl/vmyo+ (qrel2)*vcsr/vmyo); ireltau = 6*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cacsr_old)); if (REL > 0) irelss = 91*(1+1*(1/(1+pow((0.28/camkactive),8))))*(REL)/(1 + pow((1/cacsr_old),8)); else irelss = 0; // Forward Euler qrel2 += dt*((irelss-qrel2)/ireltau); // comp_qup1() real dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive); real dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive); real qup1 = 0.0002*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cassl_old,1))-0.00105*cansr_old/nsrbar; dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive); dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive); qup2 = 0.0026*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cai_old,1))-0.0042*cansr_old/nsrbar; // comp_qtr1() real qtr1 = (cansr_old-cajsr_old)/tautr1; // comp_qtr2() real qtr2 = (cansr_old-cacsr_old)/tautr2; // comp_conc() qdiff = (casss_old-cassl_old)/sstau; qgap = (cassl_old-cai_old)/gaptau; real qdiffna = (nasss_old-nassl_old)/sstau; real qgapna = (nassl_old-nai_old)/gaptau; // Forward Euler real dcasss = dt*(-(ical-2*inacass)*acap/(vsss*2.0*frdy)+(qrel1+qip3)*vjsr/vsss-qdiff); real bsss = 1/(1+(bsrbar*kmbsr/pow(kmbsr+casss_old,2))+(bslbar*kmbsl/pow(kmbsl+casss_old,2))); casss_old += bsss*dcasss; // Forward Euler real dcassl = dt*(-(qup1)*vnsr/vssl+qdiff*vsss/vssl-qgap-(icat+ipca+icab-2*inaca)*acap/(vssl*2.0*frdy)); real trpn = trpnbar1*(cassl_old/(cassl_old+kmtrpn)); real cmdn = cmdnbar1*(cassl_old/(cassl_old+kmcmdn)); real catotal = trpn+cmdn+dcassl+cassl_old; real bmyo = cmdnbar1+trpnbar1-catotal+kmtrpn+kmcmdn; real cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar1*kmcmdn)+cmdnbar1*kmtrpn; real dmyo = -kmtrpn*kmcmdn*catotal; cassl_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0; real dcajsr = dt*(qtr1-qrel1-qip3); real csqn1 = csqnbar1*(cajsr_old/(cajsr_old+kmcsqn)); real bjsr = csqnbar1 - csqn1-cajsr_old-dcajsr+kmcsqn; real cjsr = kmcsqn*(csqn1+cajsr_old+dcajsr); cajsr_old = (sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2; real dcacsr = dt*(qtr2-qrel2); real csqn = csqnbar*(cacsr_old/(cacsr_old+kmcsqn)); real bcsr = csqnbar - csqn-cacsr_old-dcacsr+kmcsqn; real ccsr = kmcsqn*(csqn+cacsr_old+dcacsr); cacsr_old = (sqrt(bcsr*bcsr+4*ccsr)-bcsr)/2; // Forward Euler real dcansr = dt*(qup1+qup2-qtr1*vjsr/vnsr-qtr2*vcsr/vnsr); cansr_old += dcansr; // Forward Euler real dnasss = dt*((-(3*inacass)*acap)/((vsss)*zna*frdy)-qdiffna); nasss_old += dnasss; // Forward Euler real dnassl = dt*((-(3*inak+ina+inal+3*inaca+ifna+inab)*acap)/((vssl)*zna*frdy)+qdiffna*vsss/vssl-qgapna); nassl_old += dnassl; // Forward Euler real dnai = dt*(qgapna*vssl/vmyo); nai_old += dnai; // Forward Euler real dki = dt*((-iktot*acap)/((vmyo+vssl+vsss)*zk*frdy)); ki_old += dki; // Forward Euler real dcai = dt*(-(qup2)*vnsr/vmyo+qgap*vssl/vmyo+(qrel2)*vcsr/vmyo); trpn = trpnbar*(cai_old/(cai_old+kmtrpn)); cmdn = cmdnbar*(cai_old/(cai_old+kmcmdn)); catotal = trpn+cmdn+dcai+cai_old; bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn; cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar*kmcmdn)+cmdnbar*kmtrpn; dmyo = -kmtrpn*kmcmdn*catotal; cai_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0; // Unused ... //real caavg = (casss_old*vsss+cassl*vssl+cai_old*vmyo)/(vsss+vmyo+vssl); real camkbound = camk0*(1-camktrap_old)*1/(1+(kmcam/casss_old)); // Forward Euler camktrap_old = dt*(alphacamk*camkbound*(camkbound+camktrap_old)-betacamk*camktrap_old) + camktrap_old; camkactive = camkbound+camktrap_old; real dvdt = -itot; v_old += dt*dvdt; // Rush-Larsen rDY_[1] = m_old; rDY_[2] = h_old; rDY_[3] = j_old; rDY_[4] = d_old; rDY_[5] = f_old; rDY_[6] = f2_old; rDY_[7] = fca_old; rDY_[8] = fca2_old; rDY_[9] = xs1_old; rDY_[10] = xs2_old; rDY_[11] = xr_old; rDY_[12] = a_old; rDY_[13] = i_old; rDY_[14] = i2_old; rDY_[15] = ml_old; rDY_[16] = ml3_old; rDY_[17] = hl_old; rDY_[18] = hl3_old; rDY_[19] = jl_old; rDY_[20] = jl3_old; rDY_[31] = b_old; rDY_[32] = g_old; rDY_[34] = y_old; // Forward Euler (I already calculated the Forward Euler scheme here ...) rDY_[0] = v_old; rDY_[21] = casss_old; rDY_[22] = cajsr_old; rDY_[23] = cacsr_old; rDY_[24] = cansr_old; rDY_[25] = cassl_old; rDY_[26] = nai_old; rDY_[27] = nassl_old; rDY_[28] = nasss_old; rDY_[29] = ki_old; rDY_[30] = cai_old; rDY_[33] = u_old; rDY_[35] = camktrap_old; rDY_[36] = ical; rDY_[37] = camkactive; rDY_[38] = qrel1; rDY_[39] = qrel2; rDY_[40] = qup2; }
bc839bd67bb886ea57bc8a232615f22c1e9d938f.cu
#include "../../gpu_utils/gpu_utils.h" #include <stddef.h> #include <stdint.h> #include "li_rudy_2011.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { log_info("Using Li & Rudy 2011 GPU model\n"); uint32_t num_volumes = solver->original_num_cells; // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(solver->sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(solver->sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES(solve_model_odes_gpu) { size_t num_cells_to_solve = ode_solver->num_cells_to_solve; uint32_t * cells_to_solve = ode_solver->cells_to_solve; real *sv = ode_solver->sv; real dt = ode_solver->min_dt; uint32_t num_steps = ode_solver->num_steps; // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adaptive mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; if (threadID < num_volumes) { *((real * )((char *) sv + pitch * 0) + threadID) = -84.058830; // V millivolt *((real * )((char *) sv + pitch * 1) + threadID) = 0.000821; // m dimensionless *((real * )((char *) sv + pitch * 2) + threadID) = 0.995741; // h dimensionless *((real * )((char *) sv + pitch * 3) + threadID) = 0.999872; // j dimensionless *((real * )((char *) sv + pitch * 4) + threadID) = 0.000016; // d dimensionless *((real * )((char *) sv + pitch * 5) + threadID) = 0.999193; // f dimensionless *((real * )((char *) sv + pitch * 6) + threadID) = 0.988692; // f2 dimensionless *((real * )((char *) sv + pitch * 7) + threadID) = 0.965405; // fca dimensionless *((real * )((char *) sv + pitch * 8) + threadID) = 0.739378; // fca2 dimensionless *((real * )((char *) sv + pitch * 9) + threadID) = 0.001114; // xs1 dimensionless *((real * )((char *) sv + pitch * 10) + threadID) = 0.042234; // xs2 dimensionless *((real * )((char *) sv + pitch * 11) + threadID) = 0.069808; // xr dimensionless *((real * )((char *) sv + pitch * 12) + threadID) = 0.000119; // a dimensionless *((real * )((char *) sv + pitch * 13) + threadID) = 0.992541; // i dimensionless *((real * )((char *) sv + pitch * 14) + threadID) = 0.745628; // i2 dimensionless *((real * )((char *) sv + pitch * 15) + threadID) = 0.000329; // ml dimensionless *((real * )((char *) sv + pitch * 16) + threadID) = 0.046538; // ml3 dimensionless *((real * )((char *) sv + pitch * 17) + threadID) = 0.984170; // hl dimensionless *((real * )((char *) sv + pitch * 18) + threadID) = 0.853893; // hl3 dimensionless *((real * )((char *) sv + pitch * 19) + threadID) = 0.912569; // jl dimensionless *((real * )((char *) sv + pitch * 20) + threadID) = 0.827885; // jl3 dimensionless *((real * )((char *) sv + pitch * 21) + threadID) = 0.000135; // casss dimensionless *((real * )((char *) sv + pitch * 22) + threadID) = 1.510741; // cajsr dimensionless *((real * )((char *) sv + pitch * 23) + threadID) = 1.537577; // cacsr dimensionless *((real * )((char *) sv + pitch * 24) + threadID) = 1.538668; // cansr dimensionless *((real * )((char *) sv + pitch * 25) + threadID) = 0.000130; // cassl dimensionless *((real * )((char *) sv + pitch * 26) + threadID) = 11.501546; // nai dimensionless *((real * )((char *) sv + pitch * 27) + threadID) = 11.501230; // nassl dimensionless *((real * )((char *) sv + pitch * 28) + threadID) = 11.501240; // nasss dimensionless *((real * )((char *) sv + pitch * 29) + threadID) = 136.422946; // ki dimensionless *((real * )((char *) sv + pitch * 30) + threadID) = 0.000053; // cai millimolar *((real * )((char *) sv + pitch * 31) + threadID) = 0.000437; // b dimensionless *((real * )((char *) sv + pitch * 32) + threadID) = 0.990384; // g dimensionless *((real * )((char *) sv + pitch * 33) + threadID) = 0.535627; // u dimensionless *((real * )((char *) sv + pitch * 34) + threadID) = 0.182859; // y dimensionless *((real * )((char *) sv + pitch * 35) + threadID) = 0.010600; // camktrap dimensionless // Additional parameters *((real * )((char *) sv + pitch * 36) + threadID) = 0.0; // ical *((real * )((char *) sv + pitch * 37) + threadID) = 0.0; // camkactive *((real * )((char *) sv + pitch * 38) + threadID) = 0.0; // qrel1 *((real * )((char *) sv + pitch * 39) + threadID) = 0.0; // qrel2 *((real * )((char *) sv + pitch * 40) + threadID) = 0.0; // qup2 } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { // Compute Right-hand-side of the ODE's RHS_gpu(sv, rDY, stim_currents[threadID], dt, sv_id); // Solve the ODE's using a mix between Forward Euler and Rush-Larsen for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv_, real *rDY_, real stim_current, real dt, int threadID_) { //const double dtmin = 0.001; //const double dtmed = 0.005; //const double dtmax = 0.1; real v_old = *((real*)((char*)sv_ + pitch * 0) + threadID_); real m_old = *((real*)((char*)sv_ + pitch * 1) + threadID_); real h_old = *((real*)((char*)sv_ + pitch * 2) + threadID_); real j_old = *((real*)((char*)sv_ + pitch * 3) + threadID_); real d_old = *((real*)((char*)sv_ + pitch * 4) + threadID_); real f_old = *((real*)((char*)sv_ + pitch * 5) + threadID_); real f2_old = *((real*)((char*)sv_ + pitch * 6) + threadID_); real fca_old = *((real*)((char*)sv_ + pitch * 7) + threadID_); real fca2_old = *((real*)((char*)sv_ + pitch * 8) + threadID_); real xs1_old = *((real*)((char*)sv_ + pitch * 9) + threadID_); real xs2_old = *((real*)((char*)sv_ + pitch * 10) + threadID_); real xr_old = *((real*)((char*)sv_ + pitch * 11) + threadID_); real a_old = *((real*)((char*)sv_ + pitch * 12) + threadID_); real i_old = *((real*)((char*)sv_ + pitch * 13) + threadID_); real i2_old = *((real*)((char*)sv_ + pitch * 14) + threadID_); real ml_old = *((real*)((char*)sv_ + pitch * 15) + threadID_); real ml3_old = *((real*)((char*)sv_ + pitch * 16) + threadID_); real hl_old = *((real*)((char*)sv_ + pitch * 17) + threadID_); real hl3_old = *((real*)((char*)sv_ + pitch * 18) + threadID_); real jl_old = *((real*)((char*)sv_ + pitch * 19) + threadID_); real jl3_old = *((real*)((char*)sv_ + pitch * 20) + threadID_); real casss_old = *((real*)((char*)sv_ + pitch * 21) + threadID_); real cajsr_old = *((real*)((char*)sv_ + pitch * 22) + threadID_); real cacsr_old = *((real*)((char*)sv_ + pitch * 23) + threadID_); real cansr_old = *((real*)((char*)sv_ + pitch * 24) + threadID_); real cassl_old = *((real*)((char*)sv_ + pitch * 25) + threadID_); real nai_old = *((real*)((char*)sv_ + pitch * 26) + threadID_); real nassl_old = *((real*)((char*)sv_ + pitch * 27) + threadID_); real nasss_old = *((real*)((char*)sv_ + pitch * 28) + threadID_); real ki_old = *((real*)((char*)sv_ + pitch * 29) + threadID_); real cai_old = *((real*)((char*)sv_ + pitch * 30) + threadID_); real b_old = *((real*)((char*)sv_ + pitch * 31) + threadID_); real g_old = *((real*)((char*)sv_ + pitch * 32) + threadID_); real u_old = *((real*)((char*)sv_ + pitch * 33) + threadID_); real y_old = *((real*)((char*)sv_ + pitch * 34) + threadID_); real camktrap_old = *((real*)((char*)sv_ + pitch * 35) + threadID_); real ical = *((real*)((char*)sv_ + pitch * 36) + threadID_); real camkactive = *((real*)((char*)sv_ + pitch * 37) + threadID_); real qrel1 = *((real*)((char*)sv_ + pitch * 38) + threadID_); real qrel2 = *((real*)((char*)sv_ + pitch * 39) + threadID_); real qup2 = *((real*)((char*)sv_ + pitch * 40) + threadID_); // Parameters // CELL GEOMETRY const real pi = 3.14; const real radius = 0.00175; const real length = 0.0164; const real rcg = 1.54; const real vcell = 1000*pi*radius*radius*length; const real ageo = 2*pi*radius*radius + 2*pi*radius*length; const real acap = rcg*ageo; const real vmyo = vcell * 0.60; const real vnsr = vcell * 0.04; //const real vmito = vcell * 0.18; const real vjsr = vcell * 0.002; const real vcsr = vcell * 0.008; const real vsss = vcell * 0.02; const real vssl = vcell * 0.15; // PHYSICAL CONSTANTS const real frdy = 96485; const real R = 8314; const real temp = 310; const real nao = 140; const real cao = 1.8; const real ko = 5.4; //const real clo = 100; const real zna = 1; const real zk = 1; //const real zcl = -1; const real zca = 2; //const real ganai = 0.75; //const real ganao = 0.75; //const real gaki = 0.75; //const real gako = 0.75; const real gacai = 1.0; const real gacao = 0.341; // CAMKII DYNAMICS const real camk0 = 0.05; const real alphacamk = 0.05; const real betacamk = 0.00068; const real kmcam = 0.0015; const real kmcamk = 0.15; //const real fca_dtaucamkbar = 10.0; // MEMBRANE IONIC CURRENTS const real gna = 18; const real gnal2 = 0.052; const real gnal3 = 0.018; const real pca = 1.9926e-4; //const real powtau = 10; const real gcat = 0.07875; const real gtos = 0.1414; const real gtof = 0.042; const real prnak = 0.014; //const real gnab = 0.0025; const real pcab = 3.99e-8; const real pnab = 0.64e-8; const real inacamax = 2.52; const real kmcaact = 0.000125; const real kmnai1 = 12.3; const real kmnao = 87.5; const real kmcai = 0.0036; const real kmcao = 1.3; const real nu = 0.35; const real ksat = 0.27; const real ibarnak = 1.1004; const real ipcabar = 0.0115; const real kmpca = 0.0005; // CALCIUM FLUXES AND CONCENTRATIONS const real IP3 = 0.0001; const real k1 = 150000; const real k1a = 16.5; const real k0 = 96000; const real k0a = 9.6; const real k2 = 1800; const real k2a = 0.21; const real tauip3r = 3.7; const real dqupcamkbar = 0.75; const real dkmplbbar = 0.00017; const real kmup = 0.00028; const real nsrbar = 15.0; const real bsrbar = 0.019975; const real kmbsr = 0.00087; const real bslbar = 0.4777; const real kmbsl = 0.0087; const real csqnbar = 2.88; const real kmcsqn = 0.8; const real cmdnbar = 0.1125; const real kmcmdn = 2.38e-3; const real trpnbar = 3.15e-2; const real kmtrpn = 0.5e-3; const real trpnbar1 = 3.5e-3; const real cmdnbar1 = 1.25e-2; const real csqnbar1 = 1.2; // CALCIUM FLUXES RATE CONSTANTS const real tautr1 = 120; const real tautr2 = 120; const real gaptau = 12; const real sstau = 0.2; // comp_revs() real eca = (R*temp/(zca*frdy))*log(cao/cassl_old); real ena = (R*temp/frdy)*log(nao/nassl_old); real ek = (R*temp/frdy)*log(ko/ki_old); // comp_ina() real ma = 0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13))); real mb = 0.16*exp(-v_old/11); real ha, hb, ja, jb; if (v_old<-40) { ha = 0.135*exp((70+v_old)/-6.8); hb = 3.56*exp(0.079*v_old)+310000*exp(0.35*v_old); ja = (-127140*exp(0.2444*v_old)-0.003474*exp(-0.04391*v_old))*(v_old+37.78)/(1+exp(0.311*(v_old+79.23))); jb = 0.1212*exp(-0.01052*v_old)/(1+exp(-0.1378*(v_old+40.14))); } else { ha = 0.0; hb = 1/(0.13*(1+exp((v_old+10.66)/-11.1))); ja = 0.0; jb = 0.3*exp(-0.0000002535*v_old)/(1+exp(-0.1*(v_old+32))); } real mtau = 1/(ma+mb); real htau = 1/(ha + hb); real jtau = 1/(ja+jb); real mss = ma*mtau; real hss = ha*htau; real jss = 1*ja*jtau; // Rush-Larsen m_old = mss-(mss-m_old)*exp(-dt/mtau); h_old = hss-(hss-h_old)*exp(-dt/htau); j_old = jss-(jss-j_old)*exp(-dt/jtau); real ina = gna*pow(m_old,3)*h_old*j_old*(v_old-ena); // comp_inal() real mltau = 1/(0.64*(v_old+37.13)/(1-exp(-0.1*(v_old+37.13))) + 0.16*exp(-v_old/11)); real ml3tau = mltau; real mlss = 1/(1+exp(-(v_old+28)/7)); real ml3ss = 1/(1+exp(-(v_old+63)/7)); real hltau = 162+132/(1+exp(-(v_old+28)/5.5)); real hl3tau = 0.5*hltau; real hlss = 1/(1+exp((v_old+28)/12)); real hl3ss = 1/(1+exp((v_old+63)/12)); real jltau = 411; real jl3tau = 0.5*jltau; real jlss = hlss; real jl3ss = hl3ss; // Rush-Larsen ml_old = mlss-(mlss-ml_old)*exp(-dt/mltau); ml3_old = ml3ss-(ml3ss-ml3_old)*exp(-dt/ml3tau); hl_old = hlss-(hlss-hl_old)*exp(-dt/hltau); hl3_old = hl3ss-(hl3ss-hl3_old)*exp(-dt/hl3tau); jl_old = jlss-(jlss-jl_old)*exp(-dt/jltau); jl3_old = jl3ss-(jl3ss-jl3_old)*exp(-dt/jl3tau); real inal2 = gnal2*ml_old*hl_old*jl_old*(v_old-ena); real inal3 = gnal3*ml3_old*hl3_old*jl3_old*(v_old-ena); real inal = inal2 + inal3; // comp_inab() real inab = pnab*frdy*((frdy*v_old)/(R*temp))*(nassl_old*exp((frdy*v_old)/(R*temp)) - nao)/(exp((frdy*v_old)/(R*temp))-1); // comp_ical() real ibarca = pca*zca*zca*(((v_old-15)*frdy*frdy)/(R*temp))*((gacai*casss_old*exp((zca*(v_old-15)*frdy)/(R*temp))-gacao*cao)/(exp((zca*(v_old-15)*frdy)/(R*temp))-1)); real dss = (1/(1.0+exp(-(v_old-2.0)/7.8))); real dtau = (0.59+0.8*exp(0.052*(v_old+13))/(1+exp(0.132*(v_old+13)))); real fss = 1/(1.0 + exp((v_old+16.5)/9.5)); real ftau = 0.92/(0.125*exp(-(0.058*(v_old-2.5))*(0.045*(v_old-2.5)))+0.1); real f2ss = fss; real f2tau = 0.90/(0.02*exp(-(0.04*(v_old-18.6))*(0.045*(v_old-18.6)))+0.005); real fcass = 0.3/(1 - ical/0.05) + 0.55/(1.0+casss_old/0.003)+0.15; real fcatau = 10*camkactive/(camkactive+kmcam) + 0.5+1/(1.0+casss_old/0.003); real fca2ss = 1.0/(1.0-ical/0.01); real fca2tau = 1*(300.0/(1.0+exp((-ical-0.175)/0.04))+125.0); // Rush-Larsen d_old = dss-(dss-d_old)*exp(-dt/dtau); f_old = fss-(fss-f_old)*exp(-dt/ftau); f2_old = f2ss-(f2ss-f2_old)*exp(-dt/f2tau); fca_old = fcass-(fcass-fca_old)*exp(-dt/fcatau); fca2_old = fca2ss-(fca2ss-fca2_old)*exp(-dt/fca2tau); ical = d_old*f_old*f2_old*fca_old*fca2_old*ibarca; // comp_icat() real bss = 1/(1+ exp (-(v_old+30)/7)); real gss = 1/(1+exp((v_old+61)/5)); real taub = 1/(1.068*exp((v_old+16.3)/30)+1.068*exp(-(v_old+16.3)/30)); real taug = 1/(0.015*exp(-(v_old+71.7)/83.3)+0.015*exp((v_old+71.7)/15.4)); // Rush-Larsen b_old = bss-(bss-b_old)*exp(-dt/taub); g_old = gss-(gss-g_old)*exp(-dt/taug); real icat = gcat*b_old*g_old*(v_old-eca); // comp_icab() real icab = pcab*zca*zca*((v_old*frdy*frdy)/(R*temp))*((gacai*cassl_old*exp((zca*v_old*frdy)/(R*temp))-gacao*cao)/(exp((zca*v_old*frdy)/(R*temp))-1)); // comp_itol() real atau = 1/(25*exp((v_old-82)/18)/(1+exp((v_old-82)/18))+25*exp(-(v_old+52)/18)/(1+exp(-(v_old+52)/18))); real itau = 2.86+ 1/(exp(-(v_old+125)/15)*0.1 + 0.1*exp((v_old+2)/26.5)); real i2tau = 21.5+ 1/(exp(-(v_old+138.2)/52)*0.005 + 0.003*exp((v_old+18)/12.5)); real ass = 1/(1+exp(-(v_old-8.9)/10.3)); real iss = 1/(1+exp((v_old+30)/11)); real i2ss = iss; // Rush-Larsen a_old = ass-(ass-a_old)*exp(-dt/atau); i_old = iss-(iss-i_old)*exp(-dt/itau); i2_old = i2ss-(i2ss-i2_old)*exp(-dt/i2tau); real itos = gtos*a_old*i_old*i2_old*(v_old-ek); real itof = gtof*(v_old-ek)/(1+exp(-(v_old-3)/19.8)); real ito1 = itos + itof; // comp_ikr() real gkr = 0.0326*sqrt(ko/5.4); real xrss = 1/(1+exp(-(v_old)/15)); real xrtau = 400.0/(1.0+exp(v_old/10.0)) + 100.0; real rkr = 1/(1+exp((v_old)/35)); // Rush-Larsen xr_old = xrss-(xrss-xr_old)*exp(-dt/xrtau); real ikr = gkr*xr_old*rkr*(v_old-ek); // comp_iks() real eks = (R*temp/frdy)*log((ko+prnak*nao)/(ki_old+prnak*nassl_old)); real gks = 0.053*(1+0.6/(1+pow((0.000038/cassl_old),1.4))); real xsss = 1/(1+exp(-(v_old-9)/13.7)); real xs1tau = 200/(exp(-(v_old+10)/6) + exp((v_old-62)/55)); real xs2tau = 1500+ 350/(exp(-(v_old+10)/4) + exp((v_old-90)/58)); // Rush-Larsen xs1_old = xsss-(xsss-xs1_old)*exp(-dt/xs1tau); xs2_old = xsss-(xsss-xs2_old)*exp(-dt/xs2tau); real iks = gks*xs1_old*xs2_old*(v_old-eks); // comp_ik1() real k1ss = 1/(1+exp((v_old+103-(2.9+ko*2.175))/10.15)); real gk1 = 0.12*sqrt(ko); real ik1 = gk1*k1ss*(v_old-ek); // comp_inaca() real allo = 1/(1+pow((kmcaact/(1.5*casss_old)),2)); real num = inacamax*(pow(nasss_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*casss_old*exp((nu-1)*v_old*frdy/(R*temp))); real denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp)); real denomterm1 = kmcao*pow(nasss_old,3)+pow(kmnao,3)*1.5*casss_old+pow(kmnai1,3)*cao*(1+1.5*casss_old/kmcai); real denomterm2 = kmcai*pow(nao,3)*(1+pow(nasss_old/kmnai1,3))+pow(nasss_old,3)*cao+pow(nao,3)*1.5*casss_old; real deltaE = num/(denommult*(denomterm1+denomterm2)); real inacass = 0.2*allo*deltaE; allo = 1/(1+pow((kmcaact/(1.5*cassl_old)),2)); num = inacamax*(pow(nassl_old,3)*cao*exp(nu*v_old*frdy/(R*temp))-pow(nao,3)*1.5*cassl_old*exp((nu-1)*v_old*frdy/(R*temp))); denommult = 1+ksat*exp((nu-1)*v_old*frdy/(R*temp)); denomterm1 = kmcao*pow(nassl_old,3)+pow(kmnao,3)*1.5*cassl_old+pow(kmnai1,3)*cao*(1+1.5*cassl_old/kmcai); denomterm2 = kmcai*pow(nao,3)*(1+pow(nassl_old/kmnai1,3))+pow(nassl_old,3)*cao+pow(nao,3)*1.5*cassl_old; deltaE = num/(denommult*(denomterm1+denomterm2)); real inaca = 0.8*allo*deltaE; // comp_inak() real inak = ibarnak*(1/(1+exp(-1*(v_old+92)*frdy/(R*temp))))*pow((nassl_old/(nassl_old+2.6)),3)*(ko/(ko+0.8)); // comp_ipca() real ipca = ipcabar/((kmpca/cassl_old)+1); // comp_if() real yss = 1/(1+exp((v_old+87)/9.5)); real ytau = 2000/(exp(-(v_old+132)/10) + exp((v_old+57)/60)); // Rush-Larsen y_old = yss - (yss-y_old)*exp(-dt/ytau); real ifna = 0.012*y_old*y_old*(v_old-ena); real ifk = 0.024*y_old*y_old*(v_old-ek); //real iftotal = ifna + ifk; // comp_istim() real istim = stim_current; // comp_itot() real icatot = ical+icat+ipca+icab-2*inaca-2*inacass; real iktot = ikr+iks+ik1-2*inak+ito1+ifk+1*istim; real inatot = 3*inak+ina+3*inaca+3*inacass+inal+ifna+inab; real itot = icatot+iktot+inatot; // comp_ip3() // Forward Euler real du = dt*(casss_old*k2*(1-u_old) - k2a*u_old); u_old += du; real POip3 = tauip3r*IP3*casss_old*(1-u_old)/((1+IP3*k0/k0a)*(1+casss_old*k1/k1a)); real qip3 = 10.920*(cajsr_old-casss_old)*(POip3); // comp_qrel1() real qdiff = (casss_old-cassl_old)/sstau; real REL = -((ical)*acap/(vsss*2.0*frdy) - (qrel1 + qip3)*vjsr/vsss + qdiff); real ireltau = 2*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cajsr_old)); real irelss; if (REL > 0) irelss = 15*(1+1*(1/(1+pow((0.28/camkactive),8))))*REL/(1 + pow((1.0/cajsr_old),8)); else irelss = 0; // Forward Euler qrel1 += dt*((irelss-qrel1)/ireltau); // comp_qrel2() real qgap = (cassl_old-cai_old)/gaptau; REL = (-qup2*vnsr/vmyo + qgap*vssl/vmyo+ (qrel2)*vcsr/vmyo); ireltau = 6*(1+1*(1/(1+pow((0.28/camkactive),8))))/(1+(0.0123/cacsr_old)); if (REL > 0) irelss = 91*(1+1*(1/(1+pow((0.28/camkactive),8))))*(REL)/(1 + pow((1/cacsr_old),8)); else irelss = 0; // Forward Euler qrel2 += dt*((irelss-qrel2)/ireltau); // comp_qup1() real dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive); real dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive); real qup1 = 0.0002*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cassl_old,1))-0.00105*cansr_old/nsrbar; dkmplb = dkmplbbar*camkactive/(kmcamk+camkactive); dqupcamk = dqupcamkbar*camkactive/(kmcamk+camkactive); qup2 = 0.0026*(dqupcamk+1)/(1+pow((kmup-dkmplb)/cai_old,1))-0.0042*cansr_old/nsrbar; // comp_qtr1() real qtr1 = (cansr_old-cajsr_old)/tautr1; // comp_qtr2() real qtr2 = (cansr_old-cacsr_old)/tautr2; // comp_conc() qdiff = (casss_old-cassl_old)/sstau; qgap = (cassl_old-cai_old)/gaptau; real qdiffna = (nasss_old-nassl_old)/sstau; real qgapna = (nassl_old-nai_old)/gaptau; // Forward Euler real dcasss = dt*(-(ical-2*inacass)*acap/(vsss*2.0*frdy)+(qrel1+qip3)*vjsr/vsss-qdiff); real bsss = 1/(1+(bsrbar*kmbsr/pow(kmbsr+casss_old,2))+(bslbar*kmbsl/pow(kmbsl+casss_old,2))); casss_old += bsss*dcasss; // Forward Euler real dcassl = dt*(-(qup1)*vnsr/vssl+qdiff*vsss/vssl-qgap-(icat+ipca+icab-2*inaca)*acap/(vssl*2.0*frdy)); real trpn = trpnbar1*(cassl_old/(cassl_old+kmtrpn)); real cmdn = cmdnbar1*(cassl_old/(cassl_old+kmcmdn)); real catotal = trpn+cmdn+dcassl+cassl_old; real bmyo = cmdnbar1+trpnbar1-catotal+kmtrpn+kmcmdn; real cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar1*kmcmdn)+cmdnbar1*kmtrpn; real dmyo = -kmtrpn*kmcmdn*catotal; cassl_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0; real dcajsr = dt*(qtr1-qrel1-qip3); real csqn1 = csqnbar1*(cajsr_old/(cajsr_old+kmcsqn)); real bjsr = csqnbar1 - csqn1-cajsr_old-dcajsr+kmcsqn; real cjsr = kmcsqn*(csqn1+cajsr_old+dcajsr); cajsr_old = (sqrt(bjsr*bjsr+4*cjsr)-bjsr)/2; real dcacsr = dt*(qtr2-qrel2); real csqn = csqnbar*(cacsr_old/(cacsr_old+kmcsqn)); real bcsr = csqnbar - csqn-cacsr_old-dcacsr+kmcsqn; real ccsr = kmcsqn*(csqn+cacsr_old+dcacsr); cacsr_old = (sqrt(bcsr*bcsr+4*ccsr)-bcsr)/2; // Forward Euler real dcansr = dt*(qup1+qup2-qtr1*vjsr/vnsr-qtr2*vcsr/vnsr); cansr_old += dcansr; // Forward Euler real dnasss = dt*((-(3*inacass)*acap)/((vsss)*zna*frdy)-qdiffna); nasss_old += dnasss; // Forward Euler real dnassl = dt*((-(3*inak+ina+inal+3*inaca+ifna+inab)*acap)/((vssl)*zna*frdy)+qdiffna*vsss/vssl-qgapna); nassl_old += dnassl; // Forward Euler real dnai = dt*(qgapna*vssl/vmyo); nai_old += dnai; // Forward Euler real dki = dt*((-iktot*acap)/((vmyo+vssl+vsss)*zk*frdy)); ki_old += dki; // Forward Euler real dcai = dt*(-(qup2)*vnsr/vmyo+qgap*vssl/vmyo+(qrel2)*vcsr/vmyo); trpn = trpnbar*(cai_old/(cai_old+kmtrpn)); cmdn = cmdnbar*(cai_old/(cai_old+kmcmdn)); catotal = trpn+cmdn+dcai+cai_old; bmyo = cmdnbar+trpnbar-catotal+kmtrpn+kmcmdn; cmyo = kmcmdn*kmtrpn-catotal*(kmtrpn+kmcmdn)+(trpnbar*kmcmdn)+cmdnbar*kmtrpn; dmyo = -kmtrpn*kmcmdn*catotal; cai_old = (2.0/3.0)*sqrt(bmyo*bmyo-3.0*cmyo)*cos(acos((9.0*bmyo*cmyo-2*bmyo*bmyo*bmyo-27*dmyo)/(2.0*pow((bmyo*bmyo-3.0*cmyo),1.5)))/3.0)-bmyo/3.0; // Unused ... //real caavg = (casss_old*vsss+cassl*vssl+cai_old*vmyo)/(vsss+vmyo+vssl); real camkbound = camk0*(1-camktrap_old)*1/(1+(kmcam/casss_old)); // Forward Euler camktrap_old = dt*(alphacamk*camkbound*(camkbound+camktrap_old)-betacamk*camktrap_old) + camktrap_old; camkactive = camkbound+camktrap_old; real dvdt = -itot; v_old += dt*dvdt; // Rush-Larsen rDY_[1] = m_old; rDY_[2] = h_old; rDY_[3] = j_old; rDY_[4] = d_old; rDY_[5] = f_old; rDY_[6] = f2_old; rDY_[7] = fca_old; rDY_[8] = fca2_old; rDY_[9] = xs1_old; rDY_[10] = xs2_old; rDY_[11] = xr_old; rDY_[12] = a_old; rDY_[13] = i_old; rDY_[14] = i2_old; rDY_[15] = ml_old; rDY_[16] = ml3_old; rDY_[17] = hl_old; rDY_[18] = hl3_old; rDY_[19] = jl_old; rDY_[20] = jl3_old; rDY_[31] = b_old; rDY_[32] = g_old; rDY_[34] = y_old; // Forward Euler (I already calculated the Forward Euler scheme here ...) rDY_[0] = v_old; rDY_[21] = casss_old; rDY_[22] = cajsr_old; rDY_[23] = cacsr_old; rDY_[24] = cansr_old; rDY_[25] = cassl_old; rDY_[26] = nai_old; rDY_[27] = nassl_old; rDY_[28] = nasss_old; rDY_[29] = ki_old; rDY_[30] = cai_old; rDY_[33] = u_old; rDY_[35] = camktrap_old; rDY_[36] = ical; rDY_[37] = camkactive; rDY_[38] = qrel1; rDY_[39] = qrel2; rDY_[40] = qup2; }
82f9468f56a01bc52903c2f43b498ea025f3bd5a.hip
// !!! This is a file automatically generated by hipify!!! /* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.0.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Copy the first part of user declarations. */ #line 15 "bison.y" /* yacc.c:339 */ #include "lex.yy.c" #include "cm.h" #include "operators.h" #line 76 "bison.cu" /* yacc.c:339 */ # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif #if YYDEBUG extern int yydebug; #endif /* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE enum yytokentype { FILENAME = 258, NAME = 259, STRING = 260, INTNUM = 261, DECIMAL1 = 262, BOOL1 = 263, APPROXNUM = 264, USERVAR = 265, ASSIGN = 266, EQUAL = 267, OR = 268, XOR = 269, AND = 270, DISTINCT = 271, IN = 272, IS = 273, LIKE = 274, REGEXP = 275, NOT = 276, BETWEEN = 277, COMPARISON = 278, SHIFT = 279, MOD = 280, FROM = 281, MULITE = 282, DELETE = 283, LOAD = 284, FILTER = 285, BY = 286, JOIN = 287, STORE = 288, INTO = 289, GROUP = 290, SELECT = 291, AS = 292, ORDER = 293, ASC = 294, DESC = 295, COUNT = 296, USING = 297, SUM = 298, AVG = 299, MIN = 300, MAX = 301, LIMIT = 302, ON = 303, BINARY = 304, LEFT = 305, RIGHT = 306, OUTER = 307, SORT = 308, SEGMENTS = 309, PRESORTED = 310, PARTITION = 311, INSERT = 312, WHERE = 313, DISPLAY = 314, CASE = 315, WHEN = 316, THEN = 317, ELSE = 318, END = 319, REFERENCES = 320, SHOW = 321, TABLES = 322, TABLE = 323, DESCRIBE = 324, DROP = 325, CREATE = 326, INDEX = 327 }; #endif /* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED union YYSTYPE { #line 25 "bison.y" /* yacc.c:355 */ long long int intval; double floatval; char *strval; int subtok; #line 193 "bison.cu" /* yacc.c:355 */ }; typedef union YYSTYPE YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define YYSTYPE_IS_DECLARED 1 #endif extern YYSTYPE yylval; int yyparse (void); /* Copy the second part of user declarations. */ #line 210 "bison.cu" /* yacc.c:358 */ #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif #if !defined _Noreturn \ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) # if defined _MSC_VER && 1200 <= _MSC_VER # define _Noreturn __declspec (noreturn) # else # define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include <alloca.h> /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include <malloc.h> /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 22 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 679 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 90 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 14 /* YYNRULES -- Number of rules. */ #define YYNRULES 84 /* YYNSTATES -- Number of states. */ #define YYNSTATES 249 /* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 327 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 22, 2, 2, 2, 32, 26, 2, 83, 84, 30, 28, 86, 29, 85, 31, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 89, 82, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 34, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 87, 25, 88, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 27, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81 }; #if YYDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 119, 119, 120, 124, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 181, 182, 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 198, 199, 200, 204, 205, 208, 211, 215, 216, 217, 221, 222, 226, 227, 230, 232, 235, 239, 240, 241, 242, 243, 244, 245, 246, 248, 251, 253, 256, 257, 258 }; #endif #if YYDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM", "DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR", "XOR", "AND", "DISTINCT", "IN", "IS", "LIKE", "REGEXP", "NOT", "'!'", "BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'", "MOD", "'^'", "FROM", "MULITE", "DELETE", "LOAD", "FILTER", "BY", "JOIN", "STORE", "INTO", "GROUP", "SELECT", "AS", "ORDER", "ASC", "DESC", "COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY", "LEFT", "RIGHT", "OUTER", "SORT", "SEGMENTS", "PRESORTED", "PARTITION", "INSERT", "WHERE", "DISPLAY", "CASE", "WHEN", "THEN", "ELSE", "END", "REFERENCES", "SHOW", "TABLES", "TABLE", "DESCRIBE", "DROP", "CREATE", "INDEX", "';'", "'('", "')'", "'.'", "','", "'{'", "'}'", "':'", "$accept", "stmt_list", "stmt", "select_stmt", "expr", "opt_group_list", "expr_list", "load_list", "val_list", "opt_val_list", "opt_where", "join_list", "opt_limit", "sort_def", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 33, 277, 278, 124, 38, 279, 43, 45, 42, 47, 37, 280, 94, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 59, 40, 41, 46, 44, 123, 125, 58 }; # endif #define YYPACT_NINF -155 #define yypact_value_is_default(Yystate) \ (!!((Yystate) == (-155))) #define YYTABLE_NINF -1 #define yytable_value_is_error(Yytable_value) \ (!!((Yytable_value) == (-1))) /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 42, 4, -2, 32, 1, 54, 9, 76, 17, 14, 3, 18, -155, -8, 92, 56, 97, 51, -155, -155, 108, 109, -155, 33, -155, 111, 112, 137, 114, 66, 120, 93, 57, -155, 95, -155, 88, 115, -44, -155, -155, -155, -155, -155, -155, 192, 192, 192, -155, 71, 73, 83, 85, 86, 79, 192, 519, -33, 130, 192, -40, 137, 168, 170, 89, 192, -155, -155, -155, 171, 175, 623, 224, 224, 192, 192, 192, 192, 192, 192, 289, 192, 192, 192, 192, 0, 192, 217, 192, 192, 192, 192, 192, 192, 192, 172, 173, 192, 192, 568, 99, 177, 126, -18, 101, 103, 185, 568, -155, 105, 311, 333, 356, 378, 400, 473, -155, 568, 588, 607, 623, -155, 186, 639, 82, 646, 98, 44, 44, -155, -155, -155, -155, -155, -35, 545, 264, -155, -155, 200, -155, -50, 201, 148, 203, 125, 121, -155, -155, -155, -155, -155, 192, -155, 24, 127, 208, 176, 174, 178, 187, -155, 188, 213, 192, 134, 166, 190, -155, -155, -155, 146, 189, 230, 445, -155, 179, 192, 233, 236, 237, -155, -155, -155, 148, 209, 239, 246, 180, -61, 192, 192, -155, 202, 205, 207, -155, 254, -155, 181, 192, 256, 260, 423, 495, 192, 192, 192, 215, 238, 568, -57, 191, 184, -155, -155, 495, 495, 495, 241, 280, -155, 192, 281, 216, -155, -155, -155, 283, 212, 568, 221, 295, -155, 302, -155, 204, 242, 306, 307, 228, 229, -155, 323, 303, 324, 247, 327, -155 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 16, 12, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 3, 0, 0, 19, 22, 23, 25, 26, 24, 21, 0, 0, 0, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 79, 0, 0, 0, 0, 0, 7, 31, 32, 0, 0, 38, 50, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, 14, 0, 0, 0, 0, 0, 0, 0, 70, 20, 0, 0, 0, 0, 0, 0, 0, 55, 46, 47, 48, 45, 57, 0, 53, 0, 52, 49, 39, 40, 41, 42, 43, 44, 61, 59, 0, 66, 69, 8, 0, 80, 81, 0, 79, 0, 0, 0, 33, 34, 35, 36, 37, 0, 58, 19, 0, 0, 0, 0, 0, 0, 5, 59, 0, 0, 0, 0, 0, 11, 13, 15, 0, 0, 0, 0, 54, 0, 0, 0, 0, 0, 9, 62, 67, 79, 0, 0, 0, 0, 30, 0, 0, 60, 0, 0, 0, 10, 0, 84, 0, 0, 0, 0, 0, 71, 0, 0, 0, 82, 0, 64, 0, 0, 0, 56, 75, 72, 73, 74, 0, 0, 6, 0, 0, 28, 76, 77, 78, 0, 0, 65, 0, 0, 83, 0, 29, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 18 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -155, -155, 326, 210, -27, 193, 272, -155, -154, -155, -155, -111, -142, -155 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 10, 11, 12, 136, 161, 57, 211, 137, 138, 66, 162, 102, 168 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 56, 170, 96, 22, 67, 68, 156, 1, 121, 157, 183, 100, 166, 201, 167, 13, 101, 142, 71, 72, 73, 122, 202, 192, 158, 159, 160, 221, 80, 222, 25, 26, 99, 14, 56, 13, 15, 27, 107, 28, 2, 69, 196, 70, 16, 3, 1, 110, 111, 112, 113, 114, 115, 97, 117, 118, 119, 120, 17, 123, 125, 126, 127, 128, 129, 130, 131, 132, 97, 4, 135, 5, 67, 68, 91, 92, 93, 94, 6, 2, 19, 7, 8, 9, 3, 18, 154, 39, 40, 41, 42, 43, 44, 215, 20, 21, 29, 80, 45, 30, 24, 31, 32, 46, 47, 225, 226, 227, 4, 69, 5, 70, 33, 34, 36, 35, 37, 6, 58, 2, 7, 8, 9, 60, 3, 174, 89, 90, 91, 92, 93, 94, 49, 59, 50, 51, 52, 53, 61, 64, 62, 38, 39, 40, 41, 42, 43, 44, 4, 79, 5, 54, 63, 45, 74, 65, 75, 6, 46, 47, 7, 8, 9, 203, 204, 55, 76, 48, 77, 78, 98, 104, 106, 210, 105, 108, 133, 134, 216, 217, 218, 109, 139, 140, 141, 143, 144, 49, 145, 50, 51, 52, 53, 146, 153, 230, 38, 39, 40, 41, 42, 43, 44, 165, 101, 169, 54, 171, 45, 172, 173, 175, 176, 46, 47, 178, 177, 182, 184, 179, 55, 38, 39, 40, 41, 42, 43, 44, 180, 185, 186, 187, 157, 45, 189, 188, 191, 193, 46, 47, 194, 195, 49, 198, 50, 51, 52, 53, 87, 197, 199, 88, 89, 90, 91, 92, 93, 94, 208, 205, 212, 54, 206, 200, 207, 209, 213, 49, 224, 50, 51, 52, 53, 220, 223, 55, 81, 82, 83, 84, 219, 228, 85, 86, 229, 231, 54, 238, 87, 233, 232, 88, 89, 90, 91, 92, 93, 94, 234, 236, 124, 81, 82, 83, 84, 235, 237, 85, 86, 239, 240, 241, 242, 87, 243, 245, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 244, 246, 85, 86, 248, 247, 103, 155, 87, 23, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 164, 85, 86, 0, 0, 181, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 116, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 147, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 148, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 149, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 150, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 0, 0, 0, 0, 151, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 0, 214, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 190, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 0, 81, 82, 83, 84, 0, 156, 85, 86, 0, 0, 0, 0, 87, 152, 0, 88, 89, 90, 91, 92, 93, 94, 0, 158, 159, 160, 81, 82, 83, 84, 0, 0, 85, 86, 95, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 0, 163, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 83, 84, 0, 0, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 84, 0, 0, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, -1, -1, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 88, 89, 90, 91, 92, 93, 94 }; static const yytype_int16 yycheck[] = { 27, 143, 35, 0, 48, 49, 41, 4, 8, 44, 164, 51, 62, 74, 64, 11, 56, 35, 45, 46, 47, 21, 83, 177, 59, 60, 61, 84, 55, 86, 38, 39, 59, 35, 61, 11, 4, 45, 65, 47, 37, 85, 184, 87, 43, 42, 4, 74, 75, 76, 77, 78, 79, 86, 81, 82, 83, 84, 4, 86, 87, 88, 89, 90, 91, 92, 93, 94, 86, 66, 97, 68, 48, 49, 30, 31, 32, 33, 75, 37, 4, 78, 79, 80, 42, 76, 4, 5, 6, 7, 8, 9, 10, 204, 77, 81, 4, 124, 16, 43, 82, 4, 51, 21, 22, 216, 217, 218, 66, 85, 68, 87, 4, 4, 3, 82, 4, 75, 4, 37, 78, 79, 80, 3, 42, 152, 28, 29, 30, 31, 32, 33, 50, 67, 52, 53, 54, 55, 45, 51, 83, 4, 5, 6, 7, 8, 9, 10, 66, 70, 68, 69, 57, 16, 83, 40, 83, 75, 21, 22, 78, 79, 80, 190, 191, 83, 83, 30, 83, 83, 40, 3, 83, 200, 4, 4, 4, 4, 205, 206, 207, 6, 83, 6, 58, 84, 83, 50, 3, 52, 53, 54, 55, 88, 8, 222, 4, 5, 6, 7, 8, 9, 10, 3, 56, 4, 69, 4, 16, 84, 89, 84, 4, 21, 22, 41, 40, 4, 84, 41, 83, 4, 5, 6, 7, 8, 9, 10, 41, 63, 40, 85, 44, 16, 4, 46, 57, 4, 21, 22, 4, 4, 50, 4, 52, 53, 54, 55, 24, 40, 4, 27, 28, 29, 30, 31, 32, 33, 4, 57, 4, 69, 57, 83, 57, 84, 6, 50, 84, 52, 53, 54, 55, 35, 83, 83, 12, 13, 14, 15, 65, 40, 18, 19, 4, 4, 69, 83, 24, 6, 74, 27, 28, 29, 30, 31, 32, 33, 86, 4, 83, 12, 13, 14, 15, 84, 4, 18, 19, 67, 4, 4, 84, 24, 85, 12, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, 4, 4, 18, 19, 4, 85, 61, 124, 24, 10, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 86, 18, 19, -1, -1, 162, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, -1, -1, -1, -1, 84, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, -1, 73, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, 72, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, -1, 12, 13, 14, 15, -1, 41, 18, 19, -1, -1, -1, -1, 24, 71, -1, 27, 28, 29, 30, 31, 32, 33, -1, 59, 60, 61, 12, 13, 14, 15, -1, -1, 18, 19, 46, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, -1, 46, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 14, 15, -1, -1, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 15, -1, -1, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 27, 28, 29, 30, 31, 32, 33 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 4, 37, 42, 66, 68, 75, 78, 79, 80, 91, 92, 93, 11, 35, 4, 43, 4, 76, 4, 77, 81, 0, 92, 82, 38, 39, 45, 47, 4, 43, 4, 51, 4, 4, 82, 3, 4, 4, 5, 6, 7, 8, 9, 10, 16, 21, 22, 30, 50, 52, 53, 54, 55, 69, 83, 94, 96, 4, 67, 3, 45, 83, 57, 51, 40, 100, 48, 49, 85, 87, 94, 94, 94, 83, 83, 83, 83, 83, 70, 94, 12, 13, 14, 15, 18, 19, 24, 27, 28, 29, 30, 31, 32, 33, 46, 35, 86, 40, 94, 51, 56, 102, 96, 3, 4, 83, 94, 4, 6, 94, 94, 94, 94, 94, 94, 84, 94, 94, 94, 94, 8, 21, 94, 83, 94, 94, 94, 94, 94, 94, 94, 94, 4, 4, 94, 94, 98, 99, 83, 6, 58, 35, 84, 83, 3, 88, 84, 84, 84, 84, 84, 71, 8, 4, 93, 41, 44, 59, 60, 61, 95, 101, 46, 86, 3, 62, 64, 103, 4, 102, 4, 84, 89, 94, 84, 4, 40, 41, 41, 41, 95, 4, 98, 84, 63, 40, 85, 46, 4, 72, 57, 98, 4, 4, 4, 102, 40, 4, 4, 83, 74, 83, 94, 94, 57, 57, 57, 4, 84, 94, 97, 4, 6, 73, 101, 94, 94, 94, 65, 35, 84, 86, 83, 84, 101, 101, 101, 40, 4, 94, 4, 74, 6, 86, 84, 4, 4, 83, 67, 4, 4, 84, 85, 4, 12, 4, 85, 4 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 90, 91, 91, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 95, 95, 96, 96, 96, 97, 97, 98, 98, 99, 99, 100, 101, 101, 101, 101, 101, 101, 101, 101, 102, 102, 103, 103, 103, 103 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 2, 3, 1, 7, 12, 5, 6, 8, 9, 7, 2, 7, 5, 7, 2, 3, 22, 1, 3, 1, 1, 1, 1, 1, 1, 14, 9, 11, 6, 2, 2, 4, 4, 4, 4, 4, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 5, 3, 8, 3, 4, 0, 3, 3, 5, 1, 1, 3, 1, 3, 0, 1, 2, 4, 5, 5, 5, 5, 6, 6, 6, 0, 2, 0, 4, 7, 3 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include <stdio.h> /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*----------------------------------------. | Print this symbol's value on YYOUTPUT. | `----------------------------------------*/ static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyo = yyoutput; YYUSE (yyo); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # endif YYUSE (yytype); } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyoutput, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule) { unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[yyssp[yyi + 1 - yynrhs]], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T yystrlen (const char *yystr) { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[*yyssp]; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { YYSIZE_T yysize1 = yysize + yystrlen (yyformat); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { yyp++; yyformat++; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 4: #line 124 "bison.y" /* yacc.c:1646 */ { emit("STMT"); } #line 1534 "bison.cu" /* yacc.c:1646 */ break; case 5: #line 128 "bison.y" /* yacc.c:1646 */ { emit_select((yyvsp[-6].strval), (yyvsp[-1].strval), (yyvsp[0].intval)); } #line 1540 "bison.cu" /* yacc.c:1646 */ break; case 6: #line 130 "bison.y" /* yacc.c:1646 */ { emit_load((yyvsp[-11].strval), (yyvsp[-8].strval), (yyvsp[-1].intval), (yyvsp[-5].strval)); } #line 1546 "bison.cu" /* yacc.c:1646 */ break; case 7: #line 132 "bison.y" /* yacc.c:1646 */ { emit_filter((yyvsp[-4].strval), (yyvsp[-1].strval));} #line 1552 "bison.cu" /* yacc.c:1646 */ break; case 8: #line 134 "bison.y" /* yacc.c:1646 */ { emit_order((yyvsp[-5].strval), (yyvsp[-2].strval), (yyvsp[0].intval));} #line 1558 "bison.cu" /* yacc.c:1646 */ break; case 9: #line 136 "bison.y" /* yacc.c:1646 */ { emit_join((yyvsp[-7].strval),(yyvsp[-2].strval),(yyvsp[-1].intval),0,-1); } #line 1564 "bison.cu" /* yacc.c:1646 */ break; case 10: #line 138 "bison.y" /* yacc.c:1646 */ { emit_store((yyvsp[-7].strval),(yyvsp[-5].strval),(yyvsp[-2].strval)); } #line 1570 "bison.cu" /* yacc.c:1646 */ break; case 11: #line 140 "bison.y" /* yacc.c:1646 */ { emit_store_binary((yyvsp[-5].strval),(yyvsp[-3].strval)); } #line 1576 "bison.cu" /* yacc.c:1646 */ break; case 12: #line 142 "bison.y" /* yacc.c:1646 */ { emit_describe_table((yyvsp[0].strval));} #line 1582 "bison.cu" /* yacc.c:1646 */ break; case 13: #line 144 "bison.y" /* yacc.c:1646 */ { emit_insert((yyvsp[-4].strval), (yyvsp[0].strval));} #line 1588 "bison.cu" /* yacc.c:1646 */ break; case 14: #line 146 "bison.y" /* yacc.c:1646 */ { emit_delete((yyvsp[-2].strval));} #line 1594 "bison.cu" /* yacc.c:1646 */ break; case 15: #line 148 "bison.y" /* yacc.c:1646 */ { emit_display((yyvsp[-5].strval), (yyvsp[-2].strval));} #line 1600 "bison.cu" /* yacc.c:1646 */ break; case 16: #line 150 "bison.y" /* yacc.c:1646 */ { emit_show_tables();} #line 1606 "bison.cu" /* yacc.c:1646 */ break; case 17: #line 152 "bison.y" /* yacc.c:1646 */ { emit_drop_table((yyvsp[0].strval));} #line 1612 "bison.cu" /* yacc.c:1646 */ break; case 18: #line 154 "bison.y" /* yacc.c:1646 */ { emit_create_bitmap_index((yyvsp[-19].strval), (yyvsp[-17].strval), (yyvsp[-15].strval), (yyvsp[-13].strval), (yyvsp[-4].strval), (yyvsp[0].strval));} #line 1618 "bison.cu" /* yacc.c:1646 */ break; case 19: #line 158 "bison.y" /* yacc.c:1646 */ { emit_name((yyvsp[0].strval)); } #line 1624 "bison.cu" /* yacc.c:1646 */ break; case 20: #line 159 "bison.y" /* yacc.c:1646 */ { emit_fieldname((yyvsp[-2].strval), (yyvsp[0].strval)); } #line 1630 "bison.cu" /* yacc.c:1646 */ break; case 21: #line 160 "bison.y" /* yacc.c:1646 */ { emit("USERVAR %s", (yyvsp[0].strval)); } #line 1636 "bison.cu" /* yacc.c:1646 */ break; case 22: #line 161 "bison.y" /* yacc.c:1646 */ { emit_string((yyvsp[0].strval)); } #line 1642 "bison.cu" /* yacc.c:1646 */ break; case 23: #line 162 "bison.y" /* yacc.c:1646 */ { emit_number((yyvsp[0].intval)); } #line 1648 "bison.cu" /* yacc.c:1646 */ break; case 24: #line 163 "bison.y" /* yacc.c:1646 */ { emit_float((yyvsp[0].floatval)); } #line 1654 "bison.cu" /* yacc.c:1646 */ break; case 25: #line 164 "bison.y" /* yacc.c:1646 */ { emit_decimal((yyvsp[0].intval)); } #line 1660 "bison.cu" /* yacc.c:1646 */ break; case 26: #line 165 "bison.y" /* yacc.c:1646 */ { emit("BOOL %d", (yyvsp[0].intval)); } #line 1666 "bison.cu" /* yacc.c:1646 */ break; case 27: #line 166 "bison.y" /* yacc.c:1646 */ { emit_varchar((yyvsp[-13].strval), (yyvsp[-11].intval), (yyvsp[-8].strval), (yyvsp[-6].intval), (yyvsp[-3].strval), (yyvsp[-1].strval));} #line 1672 "bison.cu" /* yacc.c:1646 */ break; case 28: #line 167 "bison.y" /* yacc.c:1646 */ { emit_varchar((yyvsp[-8].strval), (yyvsp[-6].intval), (yyvsp[-3].strval), (yyvsp[-1].intval), "", "");} #line 1678 "bison.cu" /* yacc.c:1646 */ break; case 29: #line 168 "bison.y" /* yacc.c:1646 */ { emit_var((yyvsp[-10].strval), (yyvsp[-8].intval), (yyvsp[-5].strval), (yyvsp[-3].strval), (yyvsp[-1].strval));} #line 1684 "bison.cu" /* yacc.c:1646 */ break; case 30: #line 169 "bison.y" /* yacc.c:1646 */ { emit_var((yyvsp[-5].strval), (yyvsp[-3].intval), (yyvsp[0].strval), "", "");} #line 1690 "bison.cu" /* yacc.c:1646 */ break; case 31: #line 170 "bison.y" /* yacc.c:1646 */ { emit_var_asc((yyvsp[-1].strval));} #line 1696 "bison.cu" /* yacc.c:1646 */ break; case 32: #line 171 "bison.y" /* yacc.c:1646 */ { emit_var_desc((yyvsp[-1].strval));} #line 1702 "bison.cu" /* yacc.c:1646 */ break; case 33: #line 172 "bison.y" /* yacc.c:1646 */ { emit_count(); } #line 1708 "bison.cu" /* yacc.c:1646 */ break; case 34: #line 173 "bison.y" /* yacc.c:1646 */ { emit_sum(); } #line 1714 "bison.cu" /* yacc.c:1646 */ break; case 35: #line 174 "bison.y" /* yacc.c:1646 */ { emit_average(); } #line 1720 "bison.cu" /* yacc.c:1646 */ break; case 36: #line 175 "bison.y" /* yacc.c:1646 */ { emit_min(); } #line 1726 "bison.cu" /* yacc.c:1646 */ break; case 37: #line 176 "bison.y" /* yacc.c:1646 */ { emit_max(); } #line 1732 "bison.cu" /* yacc.c:1646 */ break; case 38: #line 177 "bison.y" /* yacc.c:1646 */ { emit_distinct(); } #line 1738 "bison.cu" /* yacc.c:1646 */ break; case 39: #line 181 "bison.y" /* yacc.c:1646 */ { emit_add(); } #line 1744 "bison.cu" /* yacc.c:1646 */ break; case 40: #line 182 "bison.y" /* yacc.c:1646 */ { emit_minus(); } #line 1750 "bison.cu" /* yacc.c:1646 */ break; case 41: #line 183 "bison.y" /* yacc.c:1646 */ { emit_mul(); } #line 1756 "bison.cu" /* yacc.c:1646 */ break; case 42: #line 184 "bison.y" /* yacc.c:1646 */ { emit_div(); } #line 1762 "bison.cu" /* yacc.c:1646 */ break; case 43: #line 185 "bison.y" /* yacc.c:1646 */ { emit("MOD"); } #line 1768 "bison.cu" /* yacc.c:1646 */ break; case 44: #line 186 "bison.y" /* yacc.c:1646 */ { emit("MOD"); } #line 1774 "bison.cu" /* yacc.c:1646 */ break; case 45: #line 188 "bison.y" /* yacc.c:1646 */ { emit_and(); } #line 1780 "bison.cu" /* yacc.c:1646 */ break; case 46: #line 189 "bison.y" /* yacc.c:1646 */ { emit_eq(); } #line 1786 "bison.cu" /* yacc.c:1646 */ break; case 47: #line 190 "bison.y" /* yacc.c:1646 */ { emit_or(); } #line 1792 "bison.cu" /* yacc.c:1646 */ break; case 48: #line 191 "bison.y" /* yacc.c:1646 */ { emit("XOR"); } #line 1798 "bison.cu" /* yacc.c:1646 */ break; case 49: #line 192 "bison.y" /* yacc.c:1646 */ { emit("SHIFT %s", (yyvsp[-1].subtok)==1?"left":"right"); } #line 1804 "bison.cu" /* yacc.c:1646 */ break; case 50: #line 193 "bison.y" /* yacc.c:1646 */ { emit("NOT"); } #line 1810 "bison.cu" /* yacc.c:1646 */ break; case 51: #line 194 "bison.y" /* yacc.c:1646 */ { emit("NOT"); } #line 1816 "bison.cu" /* yacc.c:1646 */ break; case 52: #line 195 "bison.y" /* yacc.c:1646 */ { emit_cmp((yyvsp[-1].subtok)); } #line 1822 "bison.cu" /* yacc.c:1646 */ break; case 53: #line 196 "bison.y" /* yacc.c:1646 */ { emit_cmp(7); } #line 1828 "bison.cu" /* yacc.c:1646 */ break; case 54: #line 198 "bison.y" /* yacc.c:1646 */ { emit("CMPSELECT %d", (yyvsp[-3].subtok)); } #line 1834 "bison.cu" /* yacc.c:1646 */ break; case 55: #line 199 "bison.y" /* yacc.c:1646 */ {emit("EXPR");} #line 1840 "bison.cu" /* yacc.c:1646 */ break; case 56: #line 200 "bison.y" /* yacc.c:1646 */ { emit_case(); } #line 1846 "bison.cu" /* yacc.c:1646 */ break; case 57: #line 204 "bison.y" /* yacc.c:1646 */ { emit("ISBOOL %d", (yyvsp[0].intval)); } #line 1852 "bison.cu" /* yacc.c:1646 */ break; case 58: #line 205 "bison.y" /* yacc.c:1646 */ { emit("ISBOOL %d", (yyvsp[0].intval)); emit("NOT"); } #line 1858 "bison.cu" /* yacc.c:1646 */ break; case 59: #line 208 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1866 "bison.cu" /* yacc.c:1646 */ break; case 60: #line 211 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = (yyvsp[0].intval);} #line 1872 "bison.cu" /* yacc.c:1646 */ break; case 61: #line 215 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_sel_name((yyvsp[0].strval));} #line 1878 "bison.cu" /* yacc.c:1646 */ break; case 62: #line 216 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = (yyvsp[-4].intval) + 1; emit_sel_name((yyvsp[0].strval));} #line 1884 "bison.cu" /* yacc.c:1646 */ break; case 63: #line 217 "bison.y" /* yacc.c:1646 */ { emit_sel_name("*");} #line 1890 "bison.cu" /* yacc.c:1646 */ break; case 64: #line 221 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; } #line 1896 "bison.cu" /* yacc.c:1646 */ break; case 65: #line 222 "bison.y" /* yacc.c:1646 */ {(yyval.intval) = (yyvsp[-2].intval) + 1; } #line 1902 "bison.cu" /* yacc.c:1646 */ break; case 66: #line 226 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; } #line 1908 "bison.cu" /* yacc.c:1646 */ break; case 67: #line 227 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1 + (yyvsp[0].intval); } #line 1914 "bison.cu" /* yacc.c:1646 */ break; case 68: #line 230 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1922 "bison.cu" /* yacc.c:1646 */ break; case 70: #line 235 "bison.y" /* yacc.c:1646 */ { emit("FILTER BY"); } #line 1928 "bison.cu" /* yacc.c:1646 */ break; case 71: #line 239 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'I');} #line 1934 "bison.cu" /* yacc.c:1646 */ break; case 72: #line 240 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'L');} #line 1940 "bison.cu" /* yacc.c:1646 */ break; case 73: #line 241 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'R');} #line 1946 "bison.cu" /* yacc.c:1646 */ break; case 74: #line 242 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'O');} #line 1952 "bison.cu" /* yacc.c:1646 */ break; case 75: #line 243 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'I'); } #line 1958 "bison.cu" /* yacc.c:1646 */ break; case 76: #line 244 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'L'); } #line 1964 "bison.cu" /* yacc.c:1646 */ break; case 77: #line 245 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'R'); } #line 1970 "bison.cu" /* yacc.c:1646 */ break; case 78: #line 246 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'O'); } #line 1976 "bison.cu" /* yacc.c:1646 */ break; case 79: #line 248 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1984 "bison.cu" /* yacc.c:1646 */ break; case 80: #line 251 "bison.y" /* yacc.c:1646 */ { emit_limit((yyvsp[0].intval)); } #line 1990 "bison.cu" /* yacc.c:1646 */ break; case 81: #line 253 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1998 "bison.cu" /* yacc.c:1646 */ break; case 82: #line 256 "bison.y" /* yacc.c:1646 */ { emit_sort((yyvsp[0].strval), 0); } #line 2004 "bison.cu" /* yacc.c:1646 */ break; case 83: #line 257 "bison.y" /* yacc.c:1646 */ { emit_sort((yyvsp[-3].strval), (yyvsp[0].intval)); } #line 2010 "bison.cu" /* yacc.c:1646 */ break; case 84: #line 258 "bison.y" /* yacc.c:1646 */ { emit_presort((yyvsp[0].strval)); } #line 2016 "bison.cu" /* yacc.c:1646 */ break; #line 2020 "bison.cu" /* yacc.c:1646 */ default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } #line 260 "bison.y" /* yacc.c:1906 */ bool scan_state; unsigned int statement_count; int execute_file(int ac, char **av) { bool just_once = 0; string script; process_count = 1000000000; //1GB by default verbose = 0; ssd = 0; delta = 0; total_buffer_size = 0; hash_seed = 100; for (int i = 1; i < ac; i++) { if(strcmp(av[i],"-l") == 0) { process_count = 1000000*atoff(av[i+1]); } else if(strcmp(av[i],"-v") == 0) { verbose = 1; } else if(strcmp(av[i],"-delta") == 0) { delta = 1; } else if(strcmp(av[i],"-ssd") == 0) { ssd = 1; } else if(strcmp(av[i],"-i") == 0) { interactive = 1; break; } else if(strcmp(av[i],"-s") == 0) { just_once = 1; interactive = 1; script = av[i+1]; }; }; load_col_data(data_dict, "data.dictionary"); tot_disk = 0; if (!interactive) { if((yyin = fopen(av[ac-1], "r")) == nullptr) { perror(av[ac-1]); exit(1); }; if(yyparse()) { printf("SQL scan parse failed\n"); exit(1); }; scan_state = 1; std::clock_t start1 = std::clock(); load_vars(); statement_count = 0; clean_queues(); yyin = fopen(av[ac-1], "r"); PROC_FLUSH_BUF ( yyin ); statement_count = 0; extern FILE *yyin; context = CreateCudaDevice(0, nullptr, verbose); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; } else cout << "SQL scan parse failed" << endl; fclose(yyin); for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; if(verbose) { cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; cout<< "disk time " << ( tot_disk / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; }; } else { context = CreateCudaDevice(0, nullptr, verbose); if(!just_once) getline(cin, script); while (script != "exit" && script != "EXIT") { used_vars.clear(); yy_scan_string(script.c_str()); scan_state = 0; statement_count = 0; clean_queues(); if(yyparse()) { printf("SQL scan parse failed \n"); getline(cin, script); continue; }; scan_state = 1; load_vars(); statement_count = 0; clean_queues(); yy_scan_string(script.c_str()); std::clock_t start1 = std::clock(); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; }; for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; varNames.clear(); if(verbose) { cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; }; if(!just_once) getline(cin, script); else script = "exit"; }; while(!buffer_names.empty()) { //delete [] buffers[buffer_names.front()]; hipHostFree(buffers[buffer_names.front()]); buffer_sizes.erase(buffer_names.front()); buffers.erase(buffer_names.front()); buffer_names.pop(); }; for(auto it = index_buffers.begin(); it != index_buffers.end();it++) { hipHostFree(it->second); }; }; if(save_dict) { save_col_data(data_dict,"data.dictionary"); }; if(alloced_sz) { hipFree(alloced_tmp); alloced_sz = 0; }; return 0; } //external c global to report errors //char alenka_err[4048]; int alenkaExecute(char *s) { YY_BUFFER_STATE bp; total_buffer_size = 0; scan_state = 0; load_col_data(data_dict, "data.dictionary"); std::clock_t start; if(verbose) start = std::clock(); bp = yy_scan_string(s); yy_switch_to_buffer(bp); int ret = yyparse(); //printf("execute: returned [%d]\n", ret); if(!ret) { if(verbose) cout << "SQL scan parse worked" << endl; } scan_state = 1; load_vars(); statement_count = 0; clean_queues(); bp = yy_scan_string(s); yy_switch_to_buffer(bp); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; } else cout << "SQL scan parse failed" << endl; yy_delete_buffer(bp); // Clear Vars for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; varNames.clear(); if(verbose) cout<< "statement time " << ( ( std::clock() - start ) / (double)CLOCKS_PER_SEC ) << endl; if(save_dict) save_col_data(data_dict,"data.dictionary"); return ret; }
82f9468f56a01bc52903c2f43b498ea025f3bd5a.cu
/* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.0.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Copy the first part of user declarations. */ #line 15 "bison.y" /* yacc.c:339 */ #include "lex.yy.c" #include "cm.h" #include "operators.h" #line 76 "bison.cu" /* yacc.c:339 */ # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif #if YYDEBUG extern int yydebug; #endif /* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE enum yytokentype { FILENAME = 258, NAME = 259, STRING = 260, INTNUM = 261, DECIMAL1 = 262, BOOL1 = 263, APPROXNUM = 264, USERVAR = 265, ASSIGN = 266, EQUAL = 267, OR = 268, XOR = 269, AND = 270, DISTINCT = 271, IN = 272, IS = 273, LIKE = 274, REGEXP = 275, NOT = 276, BETWEEN = 277, COMPARISON = 278, SHIFT = 279, MOD = 280, FROM = 281, MULITE = 282, DELETE = 283, LOAD = 284, FILTER = 285, BY = 286, JOIN = 287, STORE = 288, INTO = 289, GROUP = 290, SELECT = 291, AS = 292, ORDER = 293, ASC = 294, DESC = 295, COUNT = 296, USING = 297, SUM = 298, AVG = 299, MIN = 300, MAX = 301, LIMIT = 302, ON = 303, BINARY = 304, LEFT = 305, RIGHT = 306, OUTER = 307, SORT = 308, SEGMENTS = 309, PRESORTED = 310, PARTITION = 311, INSERT = 312, WHERE = 313, DISPLAY = 314, CASE = 315, WHEN = 316, THEN = 317, ELSE = 318, END = 319, REFERENCES = 320, SHOW = 321, TABLES = 322, TABLE = 323, DESCRIBE = 324, DROP = 325, CREATE = 326, INDEX = 327 }; #endif /* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED union YYSTYPE { #line 25 "bison.y" /* yacc.c:355 */ long long int intval; double floatval; char *strval; int subtok; #line 193 "bison.cu" /* yacc.c:355 */ }; typedef union YYSTYPE YYSTYPE; # define YYSTYPE_IS_TRIVIAL 1 # define YYSTYPE_IS_DECLARED 1 #endif extern YYSTYPE yylval; int yyparse (void); /* Copy the second part of user declarations. */ #line 210 "bison.cu" /* yacc.c:358 */ #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif #if !defined _Noreturn \ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) # if defined _MSC_VER && 1200 <= _MSC_VER # define _Noreturn __declspec (noreturn) # else # define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include <alloca.h> /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include <malloc.h> /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 22 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 679 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 90 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 14 /* YYNRULES -- Number of rules. */ #define YYNRULES 84 /* YYNSTATES -- Number of states. */ #define YYNSTATES 249 /* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 327 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 22, 2, 2, 2, 32, 26, 2, 83, 84, 30, 28, 86, 29, 85, 31, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 89, 82, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 34, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 87, 25, 88, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, 27, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81 }; #if YYDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { 0, 119, 119, 120, 124, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 181, 182, 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 198, 199, 200, 204, 205, 208, 211, 215, 216, 217, 221, 222, 226, 227, 230, 232, 235, 239, 240, 241, 242, 243, 244, 245, 246, 248, 251, 253, 256, 257, 258 }; #endif #if YYDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "FILENAME", "NAME", "STRING", "INTNUM", "DECIMAL1", "BOOL1", "APPROXNUM", "USERVAR", "ASSIGN", "EQUAL", "OR", "XOR", "AND", "DISTINCT", "IN", "IS", "LIKE", "REGEXP", "NOT", "'!'", "BETWEEN", "COMPARISON", "'|'", "'&'", "SHIFT", "'+'", "'-'", "'*'", "'/'", "'%'", "MOD", "'^'", "FROM", "MULITE", "DELETE", "LOAD", "FILTER", "BY", "JOIN", "STORE", "INTO", "GROUP", "SELECT", "AS", "ORDER", "ASC", "DESC", "COUNT", "USING", "SUM", "AVG", "MIN", "MAX", "LIMIT", "ON", "BINARY", "LEFT", "RIGHT", "OUTER", "SORT", "SEGMENTS", "PRESORTED", "PARTITION", "INSERT", "WHERE", "DISPLAY", "CASE", "WHEN", "THEN", "ELSE", "END", "REFERENCES", "SHOW", "TABLES", "TABLE", "DESCRIBE", "DROP", "CREATE", "INDEX", "';'", "'('", "')'", "'.'", "','", "'{'", "'}'", "':'", "$accept", "stmt_list", "stmt", "select_stmt", "expr", "opt_group_list", "expr_list", "load_list", "val_list", "opt_val_list", "opt_where", "join_list", "opt_limit", "sort_def", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 33, 277, 278, 124, 38, 279, 43, 45, 42, 47, 37, 280, 94, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 59, 40, 41, 46, 44, 123, 125, 58 }; # endif #define YYPACT_NINF -155 #define yypact_value_is_default(Yystate) \ (!!((Yystate) == (-155))) #define YYTABLE_NINF -1 #define yytable_value_is_error(Yytable_value) \ (!!((Yytable_value) == (-1))) /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 42, 4, -2, 32, 1, 54, 9, 76, 17, 14, 3, 18, -155, -8, 92, 56, 97, 51, -155, -155, 108, 109, -155, 33, -155, 111, 112, 137, 114, 66, 120, 93, 57, -155, 95, -155, 88, 115, -44, -155, -155, -155, -155, -155, -155, 192, 192, 192, -155, 71, 73, 83, 85, 86, 79, 192, 519, -33, 130, 192, -40, 137, 168, 170, 89, 192, -155, -155, -155, 171, 175, 623, 224, 224, 192, 192, 192, 192, 192, 192, 289, 192, 192, 192, 192, 0, 192, 217, 192, 192, 192, 192, 192, 192, 192, 172, 173, 192, 192, 568, 99, 177, 126, -18, 101, 103, 185, 568, -155, 105, 311, 333, 356, 378, 400, 473, -155, 568, 588, 607, 623, -155, 186, 639, 82, 646, 98, 44, 44, -155, -155, -155, -155, -155, -35, 545, 264, -155, -155, 200, -155, -50, 201, 148, 203, 125, 121, -155, -155, -155, -155, -155, 192, -155, 24, 127, 208, 176, 174, 178, 187, -155, 188, 213, 192, 134, 166, 190, -155, -155, -155, 146, 189, 230, 445, -155, 179, 192, 233, 236, 237, -155, -155, -155, 148, 209, 239, 246, 180, -61, 192, 192, -155, 202, 205, 207, -155, 254, -155, 181, 192, 256, 260, 423, 495, 192, 192, 192, 215, 238, 568, -57, 191, 184, -155, -155, 495, 495, 495, 241, 280, -155, 192, 281, 216, -155, -155, -155, 283, 212, 568, 221, 295, -155, 302, -155, 204, 242, 306, 307, 228, 229, -155, 323, 303, 324, 247, 327, -155 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 16, 12, 0, 0, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 3, 0, 0, 19, 22, 23, 25, 26, 24, 21, 0, 0, 0, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 79, 0, 0, 0, 0, 0, 7, 31, 32, 0, 0, 38, 50, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68, 14, 0, 0, 0, 0, 0, 0, 0, 70, 20, 0, 0, 0, 0, 0, 0, 0, 55, 46, 47, 48, 45, 57, 0, 53, 0, 52, 49, 39, 40, 41, 42, 43, 44, 61, 59, 0, 66, 69, 8, 0, 80, 81, 0, 79, 0, 0, 0, 33, 34, 35, 36, 37, 0, 58, 19, 0, 0, 0, 0, 0, 0, 5, 59, 0, 0, 0, 0, 0, 11, 13, 15, 0, 0, 0, 0, 54, 0, 0, 0, 0, 0, 9, 62, 67, 79, 0, 0, 0, 0, 30, 0, 0, 60, 0, 0, 0, 10, 0, 84, 0, 0, 0, 0, 0, 71, 0, 0, 0, 82, 0, 64, 0, 0, 0, 56, 75, 72, 73, 74, 0, 0, 6, 0, 0, 28, 76, 77, 78, 0, 0, 65, 0, 0, 83, 0, 29, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 18 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -155, -155, 326, 210, -27, 193, 272, -155, -154, -155, -155, -111, -142, -155 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 10, 11, 12, 136, 161, 57, 211, 137, 138, 66, 162, 102, 168 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 56, 170, 96, 22, 67, 68, 156, 1, 121, 157, 183, 100, 166, 201, 167, 13, 101, 142, 71, 72, 73, 122, 202, 192, 158, 159, 160, 221, 80, 222, 25, 26, 99, 14, 56, 13, 15, 27, 107, 28, 2, 69, 196, 70, 16, 3, 1, 110, 111, 112, 113, 114, 115, 97, 117, 118, 119, 120, 17, 123, 125, 126, 127, 128, 129, 130, 131, 132, 97, 4, 135, 5, 67, 68, 91, 92, 93, 94, 6, 2, 19, 7, 8, 9, 3, 18, 154, 39, 40, 41, 42, 43, 44, 215, 20, 21, 29, 80, 45, 30, 24, 31, 32, 46, 47, 225, 226, 227, 4, 69, 5, 70, 33, 34, 36, 35, 37, 6, 58, 2, 7, 8, 9, 60, 3, 174, 89, 90, 91, 92, 93, 94, 49, 59, 50, 51, 52, 53, 61, 64, 62, 38, 39, 40, 41, 42, 43, 44, 4, 79, 5, 54, 63, 45, 74, 65, 75, 6, 46, 47, 7, 8, 9, 203, 204, 55, 76, 48, 77, 78, 98, 104, 106, 210, 105, 108, 133, 134, 216, 217, 218, 109, 139, 140, 141, 143, 144, 49, 145, 50, 51, 52, 53, 146, 153, 230, 38, 39, 40, 41, 42, 43, 44, 165, 101, 169, 54, 171, 45, 172, 173, 175, 176, 46, 47, 178, 177, 182, 184, 179, 55, 38, 39, 40, 41, 42, 43, 44, 180, 185, 186, 187, 157, 45, 189, 188, 191, 193, 46, 47, 194, 195, 49, 198, 50, 51, 52, 53, 87, 197, 199, 88, 89, 90, 91, 92, 93, 94, 208, 205, 212, 54, 206, 200, 207, 209, 213, 49, 224, 50, 51, 52, 53, 220, 223, 55, 81, 82, 83, 84, 219, 228, 85, 86, 229, 231, 54, 238, 87, 233, 232, 88, 89, 90, 91, 92, 93, 94, 234, 236, 124, 81, 82, 83, 84, 235, 237, 85, 86, 239, 240, 241, 242, 87, 243, 245, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 244, 246, 85, 86, 248, 247, 103, 155, 87, 23, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 164, 85, 86, 0, 0, 181, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 116, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 147, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 148, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 149, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 150, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 0, 0, 0, 0, 151, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 0, 214, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 190, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 0, 81, 82, 83, 84, 0, 156, 85, 86, 0, 0, 0, 0, 87, 152, 0, 88, 89, 90, 91, 92, 93, 94, 0, 158, 159, 160, 81, 82, 83, 84, 0, 0, 85, 86, 95, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 0, 81, 82, 83, 84, 0, 0, 85, 86, 0, 0, 0, 163, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 83, 84, 0, 0, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 84, 0, 0, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 85, 86, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, -1, -1, 0, 0, 0, 0, 87, 0, 0, 88, 89, 90, 91, 92, 93, 94, 88, 89, 90, 91, 92, 93, 94 }; static const yytype_int16 yycheck[] = { 27, 143, 35, 0, 48, 49, 41, 4, 8, 44, 164, 51, 62, 74, 64, 11, 56, 35, 45, 46, 47, 21, 83, 177, 59, 60, 61, 84, 55, 86, 38, 39, 59, 35, 61, 11, 4, 45, 65, 47, 37, 85, 184, 87, 43, 42, 4, 74, 75, 76, 77, 78, 79, 86, 81, 82, 83, 84, 4, 86, 87, 88, 89, 90, 91, 92, 93, 94, 86, 66, 97, 68, 48, 49, 30, 31, 32, 33, 75, 37, 4, 78, 79, 80, 42, 76, 4, 5, 6, 7, 8, 9, 10, 204, 77, 81, 4, 124, 16, 43, 82, 4, 51, 21, 22, 216, 217, 218, 66, 85, 68, 87, 4, 4, 3, 82, 4, 75, 4, 37, 78, 79, 80, 3, 42, 152, 28, 29, 30, 31, 32, 33, 50, 67, 52, 53, 54, 55, 45, 51, 83, 4, 5, 6, 7, 8, 9, 10, 66, 70, 68, 69, 57, 16, 83, 40, 83, 75, 21, 22, 78, 79, 80, 190, 191, 83, 83, 30, 83, 83, 40, 3, 83, 200, 4, 4, 4, 4, 205, 206, 207, 6, 83, 6, 58, 84, 83, 50, 3, 52, 53, 54, 55, 88, 8, 222, 4, 5, 6, 7, 8, 9, 10, 3, 56, 4, 69, 4, 16, 84, 89, 84, 4, 21, 22, 41, 40, 4, 84, 41, 83, 4, 5, 6, 7, 8, 9, 10, 41, 63, 40, 85, 44, 16, 4, 46, 57, 4, 21, 22, 4, 4, 50, 4, 52, 53, 54, 55, 24, 40, 4, 27, 28, 29, 30, 31, 32, 33, 4, 57, 4, 69, 57, 83, 57, 84, 6, 50, 84, 52, 53, 54, 55, 35, 83, 83, 12, 13, 14, 15, 65, 40, 18, 19, 4, 4, 69, 83, 24, 6, 74, 27, 28, 29, 30, 31, 32, 33, 86, 4, 83, 12, 13, 14, 15, 84, 4, 18, 19, 67, 4, 4, 84, 24, 85, 12, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, 4, 4, 18, 19, 4, 85, 61, 124, 24, 10, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 86, 18, 19, -1, -1, 162, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, 84, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, -1, -1, -1, -1, 84, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, -1, 73, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, 72, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, -1, 12, 13, 14, 15, -1, 41, 18, 19, -1, -1, -1, -1, 24, 71, -1, 27, 28, 29, 30, 31, 32, 33, -1, 59, 60, 61, 12, 13, 14, 15, -1, -1, 18, 19, 46, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, -1, 12, 13, 14, 15, -1, -1, 18, 19, -1, -1, -1, 46, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 14, 15, -1, -1, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 15, -1, -1, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 18, 19, -1, -1, -1, -1, 24, -1, -1, 27, 28, 29, 30, 31, 32, 33, 27, 28, 29, 30, 31, 32, 33 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 4, 37, 42, 66, 68, 75, 78, 79, 80, 91, 92, 93, 11, 35, 4, 43, 4, 76, 4, 77, 81, 0, 92, 82, 38, 39, 45, 47, 4, 43, 4, 51, 4, 4, 82, 3, 4, 4, 5, 6, 7, 8, 9, 10, 16, 21, 22, 30, 50, 52, 53, 54, 55, 69, 83, 94, 96, 4, 67, 3, 45, 83, 57, 51, 40, 100, 48, 49, 85, 87, 94, 94, 94, 83, 83, 83, 83, 83, 70, 94, 12, 13, 14, 15, 18, 19, 24, 27, 28, 29, 30, 31, 32, 33, 46, 35, 86, 40, 94, 51, 56, 102, 96, 3, 4, 83, 94, 4, 6, 94, 94, 94, 94, 94, 94, 84, 94, 94, 94, 94, 8, 21, 94, 83, 94, 94, 94, 94, 94, 94, 94, 94, 4, 4, 94, 94, 98, 99, 83, 6, 58, 35, 84, 83, 3, 88, 84, 84, 84, 84, 84, 71, 8, 4, 93, 41, 44, 59, 60, 61, 95, 101, 46, 86, 3, 62, 64, 103, 4, 102, 4, 84, 89, 94, 84, 4, 40, 41, 41, 41, 95, 4, 98, 84, 63, 40, 85, 46, 4, 72, 57, 98, 4, 4, 4, 102, 40, 4, 4, 83, 74, 83, 94, 94, 57, 57, 57, 4, 84, 94, 97, 4, 6, 73, 101, 94, 94, 94, 65, 35, 84, 86, 83, 84, 101, 101, 101, 40, 4, 94, 4, 74, 6, 86, 84, 4, 4, 83, 67, 4, 4, 84, 85, 4, 12, 4, 85, 4 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 90, 91, 91, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 95, 95, 96, 96, 96, 97, 97, 98, 98, 99, 99, 100, 101, 101, 101, 101, 101, 101, 101, 101, 102, 102, 103, 103, 103, 103 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 2, 3, 1, 7, 12, 5, 6, 8, 9, 7, 2, 7, 5, 7, 2, 3, 22, 1, 3, 1, 1, 1, 1, 1, 1, 14, 9, 11, 6, 2, 2, 4, 4, 4, 4, 4, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 5, 3, 8, 3, 4, 0, 3, 3, 5, 1, 1, 3, 1, 3, 0, 1, 2, 4, 5, 5, 5, 5, 6, 6, 6, 0, 2, 0, 4, 7, 3 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if YYDEBUG # ifndef YYFPRINTF # include <stdio.h> /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*----------------------------------------. | Print this symbol's value on YYOUTPUT. | `----------------------------------------*/ static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyo = yyoutput; YYUSE (yyo); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # endif YYUSE (yytype); } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyoutput, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule) { unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[yyssp[yyi + 1 - yynrhs]], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !YYDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T yystrlen (const char *yystr) { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[*yyssp]; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { YYSIZE_T yysize1 = yysize + yystrlen (yyformat); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { yyp++; yyformat++; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 4: #line 124 "bison.y" /* yacc.c:1646 */ { emit("STMT"); } #line 1534 "bison.cu" /* yacc.c:1646 */ break; case 5: #line 128 "bison.y" /* yacc.c:1646 */ { emit_select((yyvsp[-6].strval), (yyvsp[-1].strval), (yyvsp[0].intval)); } #line 1540 "bison.cu" /* yacc.c:1646 */ break; case 6: #line 130 "bison.y" /* yacc.c:1646 */ { emit_load((yyvsp[-11].strval), (yyvsp[-8].strval), (yyvsp[-1].intval), (yyvsp[-5].strval)); } #line 1546 "bison.cu" /* yacc.c:1646 */ break; case 7: #line 132 "bison.y" /* yacc.c:1646 */ { emit_filter((yyvsp[-4].strval), (yyvsp[-1].strval));} #line 1552 "bison.cu" /* yacc.c:1646 */ break; case 8: #line 134 "bison.y" /* yacc.c:1646 */ { emit_order((yyvsp[-5].strval), (yyvsp[-2].strval), (yyvsp[0].intval));} #line 1558 "bison.cu" /* yacc.c:1646 */ break; case 9: #line 136 "bison.y" /* yacc.c:1646 */ { emit_join((yyvsp[-7].strval),(yyvsp[-2].strval),(yyvsp[-1].intval),0,-1); } #line 1564 "bison.cu" /* yacc.c:1646 */ break; case 10: #line 138 "bison.y" /* yacc.c:1646 */ { emit_store((yyvsp[-7].strval),(yyvsp[-5].strval),(yyvsp[-2].strval)); } #line 1570 "bison.cu" /* yacc.c:1646 */ break; case 11: #line 140 "bison.y" /* yacc.c:1646 */ { emit_store_binary((yyvsp[-5].strval),(yyvsp[-3].strval)); } #line 1576 "bison.cu" /* yacc.c:1646 */ break; case 12: #line 142 "bison.y" /* yacc.c:1646 */ { emit_describe_table((yyvsp[0].strval));} #line 1582 "bison.cu" /* yacc.c:1646 */ break; case 13: #line 144 "bison.y" /* yacc.c:1646 */ { emit_insert((yyvsp[-4].strval), (yyvsp[0].strval));} #line 1588 "bison.cu" /* yacc.c:1646 */ break; case 14: #line 146 "bison.y" /* yacc.c:1646 */ { emit_delete((yyvsp[-2].strval));} #line 1594 "bison.cu" /* yacc.c:1646 */ break; case 15: #line 148 "bison.y" /* yacc.c:1646 */ { emit_display((yyvsp[-5].strval), (yyvsp[-2].strval));} #line 1600 "bison.cu" /* yacc.c:1646 */ break; case 16: #line 150 "bison.y" /* yacc.c:1646 */ { emit_show_tables();} #line 1606 "bison.cu" /* yacc.c:1646 */ break; case 17: #line 152 "bison.y" /* yacc.c:1646 */ { emit_drop_table((yyvsp[0].strval));} #line 1612 "bison.cu" /* yacc.c:1646 */ break; case 18: #line 154 "bison.y" /* yacc.c:1646 */ { emit_create_bitmap_index((yyvsp[-19].strval), (yyvsp[-17].strval), (yyvsp[-15].strval), (yyvsp[-13].strval), (yyvsp[-4].strval), (yyvsp[0].strval));} #line 1618 "bison.cu" /* yacc.c:1646 */ break; case 19: #line 158 "bison.y" /* yacc.c:1646 */ { emit_name((yyvsp[0].strval)); } #line 1624 "bison.cu" /* yacc.c:1646 */ break; case 20: #line 159 "bison.y" /* yacc.c:1646 */ { emit_fieldname((yyvsp[-2].strval), (yyvsp[0].strval)); } #line 1630 "bison.cu" /* yacc.c:1646 */ break; case 21: #line 160 "bison.y" /* yacc.c:1646 */ { emit("USERVAR %s", (yyvsp[0].strval)); } #line 1636 "bison.cu" /* yacc.c:1646 */ break; case 22: #line 161 "bison.y" /* yacc.c:1646 */ { emit_string((yyvsp[0].strval)); } #line 1642 "bison.cu" /* yacc.c:1646 */ break; case 23: #line 162 "bison.y" /* yacc.c:1646 */ { emit_number((yyvsp[0].intval)); } #line 1648 "bison.cu" /* yacc.c:1646 */ break; case 24: #line 163 "bison.y" /* yacc.c:1646 */ { emit_float((yyvsp[0].floatval)); } #line 1654 "bison.cu" /* yacc.c:1646 */ break; case 25: #line 164 "bison.y" /* yacc.c:1646 */ { emit_decimal((yyvsp[0].intval)); } #line 1660 "bison.cu" /* yacc.c:1646 */ break; case 26: #line 165 "bison.y" /* yacc.c:1646 */ { emit("BOOL %d", (yyvsp[0].intval)); } #line 1666 "bison.cu" /* yacc.c:1646 */ break; case 27: #line 166 "bison.y" /* yacc.c:1646 */ { emit_varchar((yyvsp[-13].strval), (yyvsp[-11].intval), (yyvsp[-8].strval), (yyvsp[-6].intval), (yyvsp[-3].strval), (yyvsp[-1].strval));} #line 1672 "bison.cu" /* yacc.c:1646 */ break; case 28: #line 167 "bison.y" /* yacc.c:1646 */ { emit_varchar((yyvsp[-8].strval), (yyvsp[-6].intval), (yyvsp[-3].strval), (yyvsp[-1].intval), "", "");} #line 1678 "bison.cu" /* yacc.c:1646 */ break; case 29: #line 168 "bison.y" /* yacc.c:1646 */ { emit_var((yyvsp[-10].strval), (yyvsp[-8].intval), (yyvsp[-5].strval), (yyvsp[-3].strval), (yyvsp[-1].strval));} #line 1684 "bison.cu" /* yacc.c:1646 */ break; case 30: #line 169 "bison.y" /* yacc.c:1646 */ { emit_var((yyvsp[-5].strval), (yyvsp[-3].intval), (yyvsp[0].strval), "", "");} #line 1690 "bison.cu" /* yacc.c:1646 */ break; case 31: #line 170 "bison.y" /* yacc.c:1646 */ { emit_var_asc((yyvsp[-1].strval));} #line 1696 "bison.cu" /* yacc.c:1646 */ break; case 32: #line 171 "bison.y" /* yacc.c:1646 */ { emit_var_desc((yyvsp[-1].strval));} #line 1702 "bison.cu" /* yacc.c:1646 */ break; case 33: #line 172 "bison.y" /* yacc.c:1646 */ { emit_count(); } #line 1708 "bison.cu" /* yacc.c:1646 */ break; case 34: #line 173 "bison.y" /* yacc.c:1646 */ { emit_sum(); } #line 1714 "bison.cu" /* yacc.c:1646 */ break; case 35: #line 174 "bison.y" /* yacc.c:1646 */ { emit_average(); } #line 1720 "bison.cu" /* yacc.c:1646 */ break; case 36: #line 175 "bison.y" /* yacc.c:1646 */ { emit_min(); } #line 1726 "bison.cu" /* yacc.c:1646 */ break; case 37: #line 176 "bison.y" /* yacc.c:1646 */ { emit_max(); } #line 1732 "bison.cu" /* yacc.c:1646 */ break; case 38: #line 177 "bison.y" /* yacc.c:1646 */ { emit_distinct(); } #line 1738 "bison.cu" /* yacc.c:1646 */ break; case 39: #line 181 "bison.y" /* yacc.c:1646 */ { emit_add(); } #line 1744 "bison.cu" /* yacc.c:1646 */ break; case 40: #line 182 "bison.y" /* yacc.c:1646 */ { emit_minus(); } #line 1750 "bison.cu" /* yacc.c:1646 */ break; case 41: #line 183 "bison.y" /* yacc.c:1646 */ { emit_mul(); } #line 1756 "bison.cu" /* yacc.c:1646 */ break; case 42: #line 184 "bison.y" /* yacc.c:1646 */ { emit_div(); } #line 1762 "bison.cu" /* yacc.c:1646 */ break; case 43: #line 185 "bison.y" /* yacc.c:1646 */ { emit("MOD"); } #line 1768 "bison.cu" /* yacc.c:1646 */ break; case 44: #line 186 "bison.y" /* yacc.c:1646 */ { emit("MOD"); } #line 1774 "bison.cu" /* yacc.c:1646 */ break; case 45: #line 188 "bison.y" /* yacc.c:1646 */ { emit_and(); } #line 1780 "bison.cu" /* yacc.c:1646 */ break; case 46: #line 189 "bison.y" /* yacc.c:1646 */ { emit_eq(); } #line 1786 "bison.cu" /* yacc.c:1646 */ break; case 47: #line 190 "bison.y" /* yacc.c:1646 */ { emit_or(); } #line 1792 "bison.cu" /* yacc.c:1646 */ break; case 48: #line 191 "bison.y" /* yacc.c:1646 */ { emit("XOR"); } #line 1798 "bison.cu" /* yacc.c:1646 */ break; case 49: #line 192 "bison.y" /* yacc.c:1646 */ { emit("SHIFT %s", (yyvsp[-1].subtok)==1?"left":"right"); } #line 1804 "bison.cu" /* yacc.c:1646 */ break; case 50: #line 193 "bison.y" /* yacc.c:1646 */ { emit("NOT"); } #line 1810 "bison.cu" /* yacc.c:1646 */ break; case 51: #line 194 "bison.y" /* yacc.c:1646 */ { emit("NOT"); } #line 1816 "bison.cu" /* yacc.c:1646 */ break; case 52: #line 195 "bison.y" /* yacc.c:1646 */ { emit_cmp((yyvsp[-1].subtok)); } #line 1822 "bison.cu" /* yacc.c:1646 */ break; case 53: #line 196 "bison.y" /* yacc.c:1646 */ { emit_cmp(7); } #line 1828 "bison.cu" /* yacc.c:1646 */ break; case 54: #line 198 "bison.y" /* yacc.c:1646 */ { emit("CMPSELECT %d", (yyvsp[-3].subtok)); } #line 1834 "bison.cu" /* yacc.c:1646 */ break; case 55: #line 199 "bison.y" /* yacc.c:1646 */ {emit("EXPR");} #line 1840 "bison.cu" /* yacc.c:1646 */ break; case 56: #line 200 "bison.y" /* yacc.c:1646 */ { emit_case(); } #line 1846 "bison.cu" /* yacc.c:1646 */ break; case 57: #line 204 "bison.y" /* yacc.c:1646 */ { emit("ISBOOL %d", (yyvsp[0].intval)); } #line 1852 "bison.cu" /* yacc.c:1646 */ break; case 58: #line 205 "bison.y" /* yacc.c:1646 */ { emit("ISBOOL %d", (yyvsp[0].intval)); emit("NOT"); } #line 1858 "bison.cu" /* yacc.c:1646 */ break; case 59: #line 208 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1866 "bison.cu" /* yacc.c:1646 */ break; case 60: #line 211 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = (yyvsp[0].intval);} #line 1872 "bison.cu" /* yacc.c:1646 */ break; case 61: #line 215 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_sel_name((yyvsp[0].strval));} #line 1878 "bison.cu" /* yacc.c:1646 */ break; case 62: #line 216 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = (yyvsp[-4].intval) + 1; emit_sel_name((yyvsp[0].strval));} #line 1884 "bison.cu" /* yacc.c:1646 */ break; case 63: #line 217 "bison.y" /* yacc.c:1646 */ { emit_sel_name("*");} #line 1890 "bison.cu" /* yacc.c:1646 */ break; case 64: #line 221 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; } #line 1896 "bison.cu" /* yacc.c:1646 */ break; case 65: #line 222 "bison.y" /* yacc.c:1646 */ {(yyval.intval) = (yyvsp[-2].intval) + 1; } #line 1902 "bison.cu" /* yacc.c:1646 */ break; case 66: #line 226 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; } #line 1908 "bison.cu" /* yacc.c:1646 */ break; case 67: #line 227 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1 + (yyvsp[0].intval); } #line 1914 "bison.cu" /* yacc.c:1646 */ break; case 68: #line 230 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1922 "bison.cu" /* yacc.c:1646 */ break; case 70: #line 235 "bison.y" /* yacc.c:1646 */ { emit("FILTER BY"); } #line 1928 "bison.cu" /* yacc.c:1646 */ break; case 71: #line 239 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'I');} #line 1934 "bison.cu" /* yacc.c:1646 */ break; case 72: #line 240 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'L');} #line 1940 "bison.cu" /* yacc.c:1646 */ break; case 73: #line 241 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'R');} #line 1946 "bison.cu" /* yacc.c:1646 */ break; case 74: #line 242 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-2].strval), 'O');} #line 1952 "bison.cu" /* yacc.c:1646 */ break; case 75: #line 243 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'I'); } #line 1958 "bison.cu" /* yacc.c:1646 */ break; case 76: #line 244 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'L'); } #line 1964 "bison.cu" /* yacc.c:1646 */ break; case 77: #line 245 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'R'); } #line 1970 "bison.cu" /* yacc.c:1646 */ break; case 78: #line 246 "bison.y" /* yacc.c:1646 */ { (yyval.intval) = 1; emit_join_tab((yyvsp[-3].strval), 'O'); } #line 1976 "bison.cu" /* yacc.c:1646 */ break; case 79: #line 248 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1984 "bison.cu" /* yacc.c:1646 */ break; case 80: #line 251 "bison.y" /* yacc.c:1646 */ { emit_limit((yyvsp[0].intval)); } #line 1990 "bison.cu" /* yacc.c:1646 */ break; case 81: #line 253 "bison.y" /* yacc.c:1646 */ { /* nil */ (yyval.intval) = 0; } #line 1998 "bison.cu" /* yacc.c:1646 */ break; case 82: #line 256 "bison.y" /* yacc.c:1646 */ { emit_sort((yyvsp[0].strval), 0); } #line 2004 "bison.cu" /* yacc.c:1646 */ break; case 83: #line 257 "bison.y" /* yacc.c:1646 */ { emit_sort((yyvsp[-3].strval), (yyvsp[0].intval)); } #line 2010 "bison.cu" /* yacc.c:1646 */ break; case 84: #line 258 "bison.y" /* yacc.c:1646 */ { emit_presort((yyvsp[0].strval)); } #line 2016 "bison.cu" /* yacc.c:1646 */ break; #line 2020 "bison.cu" /* yacc.c:1646 */ default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } #line 260 "bison.y" /* yacc.c:1906 */ bool scan_state; unsigned int statement_count; int execute_file(int ac, char **av) { bool just_once = 0; string script; process_count = 1000000000; //1GB by default verbose = 0; ssd = 0; delta = 0; total_buffer_size = 0; hash_seed = 100; for (int i = 1; i < ac; i++) { if(strcmp(av[i],"-l") == 0) { process_count = 1000000*atoff(av[i+1]); } else if(strcmp(av[i],"-v") == 0) { verbose = 1; } else if(strcmp(av[i],"-delta") == 0) { delta = 1; } else if(strcmp(av[i],"-ssd") == 0) { ssd = 1; } else if(strcmp(av[i],"-i") == 0) { interactive = 1; break; } else if(strcmp(av[i],"-s") == 0) { just_once = 1; interactive = 1; script = av[i+1]; }; }; load_col_data(data_dict, "data.dictionary"); tot_disk = 0; if (!interactive) { if((yyin = fopen(av[ac-1], "r")) == nullptr) { perror(av[ac-1]); exit(1); }; if(yyparse()) { printf("SQL scan parse failed\n"); exit(1); }; scan_state = 1; std::clock_t start1 = std::clock(); load_vars(); statement_count = 0; clean_queues(); yyin = fopen(av[ac-1], "r"); PROC_FLUSH_BUF ( yyin ); statement_count = 0; extern FILE *yyin; context = CreateCudaDevice(0, nullptr, verbose); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; } else cout << "SQL scan parse failed" << endl; fclose(yyin); for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; if(verbose) { cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; cout<< "disk time " << ( tot_disk / (double)CLOCKS_PER_SEC ) << " " << getFreeMem() << endl; }; } else { context = CreateCudaDevice(0, nullptr, verbose); if(!just_once) getline(cin, script); while (script != "exit" && script != "EXIT") { used_vars.clear(); yy_scan_string(script.c_str()); scan_state = 0; statement_count = 0; clean_queues(); if(yyparse()) { printf("SQL scan parse failed \n"); getline(cin, script); continue; }; scan_state = 1; load_vars(); statement_count = 0; clean_queues(); yy_scan_string(script.c_str()); std::clock_t start1 = std::clock(); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; }; for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; varNames.clear(); if(verbose) { cout<< "cycle time " << ( ( std::clock() - start1 ) / (double)CLOCKS_PER_SEC ) << endl; }; if(!just_once) getline(cin, script); else script = "exit"; }; while(!buffer_names.empty()) { //delete [] buffers[buffer_names.front()]; cudaFreeHost(buffers[buffer_names.front()]); buffer_sizes.erase(buffer_names.front()); buffers.erase(buffer_names.front()); buffer_names.pop(); }; for(auto it = index_buffers.begin(); it != index_buffers.end();it++) { cudaFreeHost(it->second); }; }; if(save_dict) { save_col_data(data_dict,"data.dictionary"); }; if(alloced_sz) { cudaFree(alloced_tmp); alloced_sz = 0; }; return 0; } //external c global to report errors //char alenka_err[4048]; int alenkaExecute(char *s) { YY_BUFFER_STATE bp; total_buffer_size = 0; scan_state = 0; load_col_data(data_dict, "data.dictionary"); std::clock_t start; if(verbose) start = std::clock(); bp = yy_scan_string(s); yy_switch_to_buffer(bp); int ret = yyparse(); //printf("execute: returned [%d]\n", ret); if(!ret) { if(verbose) cout << "SQL scan parse worked" << endl; } scan_state = 1; load_vars(); statement_count = 0; clean_queues(); bp = yy_scan_string(s); yy_switch_to_buffer(bp); if(!yyparse()) { if(verbose) cout << "SQL scan parse worked " << endl; } else cout << "SQL scan parse failed" << endl; yy_delete_buffer(bp); // Clear Vars for (auto it=varNames.begin() ; it != varNames.end(); ++it ) { (*it).second->free(); }; varNames.clear(); if(verbose) cout<< "statement time " << ( ( std::clock() - start ) / (double)CLOCKS_PER_SEC ) << endl; if(save_dict) save_col_data(data_dict,"data.dictionary"); return ret; }
e086967885caf8bb5726b02b4a7569400c06a118.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <time.h> #include <stdio.h> __global__ void kernel(const double * A, const double * B, double * C, int rows, int cols, int k){ // int blockId = blockIdx.x + blockIdx.y * gridDim.x; //int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(i>=rows || j>=cols) return; for(int x=0; x<k; x++) C[i * cols + j] += A[i * k + x] * B[j * k + x]; } extern "C" long int kernel_wrapper(const double * A, const double * B, double * C, int rowA, int colA, int rowB, int colB){ time_t start, end; double * d_A; double * d_B; double * d_C; hipError_t error; error = hipMalloc((void**)&d_A, rowA * colA * sizeof(double)); error = hipMalloc((void**)&d_B, rowB * colB * sizeof(double)); error = hipMalloc((void**)&d_C, rowA * colB * sizeof(double)); if(error){ printf("Error reservando memoria GPU.\n"); return 0; } hipMemset(d_C, 0, rowA * colB * sizeof(double)); error = hipMemcpy((void*)d_A, (void*)A, rowA * colA * sizeof(double), hipMemcpyHostToDevice); error = hipMemcpy((void*)d_B, (void*)B, rowB * colB * sizeof(double), hipMemcpyHostToDevice); if(error){ printf("Error copiando memoria a GPU.\n"); return 0; } //Grid of 16x16 blocks int blocksX = (colB + 15)/16; int blocksY = (rowA + 15)/16; dim3 grid(blocksX, blocksY, 1); dim3 block(16, 16, 1); start = clock(); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, rowA, colB, colA); end = clock(); hipMemcpy(C, d_C, rowA * colB * sizeof(double), hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); return (long int)end-start; }
e086967885caf8bb5726b02b4a7569400c06a118.cu
#include <time.h> #include <stdio.h> __global__ void kernel(const double * A, const double * B, double * C, int rows, int cols, int k){ // int blockId = blockIdx.x + blockIdx.y * gridDim.x; //int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(i>=rows || j>=cols) return; for(int x=0; x<k; x++) C[i * cols + j] += A[i * k + x] * B[j * k + x]; } extern "C" long int kernel_wrapper(const double * A, const double * B, double * C, int rowA, int colA, int rowB, int colB){ time_t start, end; double * d_A; double * d_B; double * d_C; cudaError_t error; error = cudaMalloc((void**)&d_A, rowA * colA * sizeof(double)); error = cudaMalloc((void**)&d_B, rowB * colB * sizeof(double)); error = cudaMalloc((void**)&d_C, rowA * colB * sizeof(double)); if(error){ printf("Error reservando memoria GPU.\n"); return 0; } cudaMemset(d_C, 0, rowA * colB * sizeof(double)); error = cudaMemcpy((void*)d_A, (void*)A, rowA * colA * sizeof(double), cudaMemcpyHostToDevice); error = cudaMemcpy((void*)d_B, (void*)B, rowB * colB * sizeof(double), cudaMemcpyHostToDevice); if(error){ printf("Error copiando memoria a GPU.\n"); return 0; } //Grid of 16x16 blocks int blocksX = (colB + 15)/16; int blocksY = (rowA + 15)/16; dim3 grid(blocksX, blocksY, 1); dim3 block(16, 16, 1); start = clock(); kernel<<<grid, block>>>(d_A, d_B, d_C, rowA, colB, colA); end = clock(); cudaMemcpy(C, d_C, rowA * colB * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return (long int)end-start; }
5da2d4e76e126fd63e64c555c02da98f7ab84b63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _Sigmoid(const int nthreads, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { y[i] = T(1) / (T(1) + exp(-x[i])); } } template <> __global__ void _Sigmoid<half>(const int nthreads, const half* x, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { y[i] = __float2half(1.f / (1.f + exp(-__half2float(x[i])))); } } template <> __global__ void _Sigmoid<half2>(const int nthreads, const half2* x, half2* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float2 val = __half22float2(x[i]); y[i] = __floats2half2_rn(1.f / (1.f + exp(-val.x)), 1.f / (1.f + exp(-val.y))); } } template <typename T> __global__ void _SigmoidGrad(const int nthreads, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = dy[i] * __ldg(y + i) * (1 - __ldg(y + i)); #else dx[i] = dy[i] * y[i] * (1 - y[i]); #endif } } template <> __global__ void _SigmoidGrad<half>( const int nthreads, const half* dy, const half* y, half* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float val = __half2float(y[i]); dx[i] = __float2half(__half2float(dy[i]) * val * (1.f - val)); } } // SigmoidGrad template <> __global__ void _SigmoidGrad<half2>( const int nthreads, const half2* dy, const half2* y, half2* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float2 val = __half22float2(y[i]); const float2 grad = __half22float2(dy[i]); dx[i] = __floats2half2_rn( grad.x * val.x * (1.f - val.x), grad.y * val.y * (1.f - val.y)); } } // SigmoidGrad } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void Sigmoid<float16, CUDAContext>( const int count, const float16* x, float16* y, CUDAContext* ctx) { if ((count & 1) == 0) { hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(count >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count >> 1, reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y)); } else { hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } } template <> void SigmoidGrad<float16, CUDAContext>( const int count, const float16* dy, const float16* y, float16* dx, CUDAContext* ctx) { if ((count & 1) == 0) { hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(count >> 1)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count >> 1, reinterpret_cast<const half2*>(dy), reinterpret_cast<const half2*>(y), reinterpret_cast<half2*>(dx)); } else { hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count, reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } } // SigmoidGrad #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void Sigmoid<T, CUDAContext>( \ const int count, const T* x, T* y, CUDAContext* ctx) { \ hipLaunchKernelGGL(( _Sigmoid), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ count, x, y); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void SigmoidGrad<T, CUDAContext>( \ const int count, const T* dy, const T* y, T* dx, CUDAContext* ctx) { \ hipLaunchKernelGGL(( _SigmoidGrad), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ count, dy, y, dx); \ } DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_ROCM
5da2d4e76e126fd63e64c555c02da98f7ab84b63.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _Sigmoid(const int nthreads, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { y[i] = T(1) / (T(1) + exp(-x[i])); } } template <> __global__ void _Sigmoid<half>(const int nthreads, const half* x, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { y[i] = __float2half(1.f / (1.f + exp(-__half2float(x[i])))); } } template <> __global__ void _Sigmoid<half2>(const int nthreads, const half2* x, half2* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float2 val = __half22float2(x[i]); y[i] = __floats2half2_rn(1.f / (1.f + exp(-val.x)), 1.f / (1.f + exp(-val.y))); } } template <typename T> __global__ void _SigmoidGrad(const int nthreads, const T* dy, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = dy[i] * __ldg(y + i) * (1 - __ldg(y + i)); #else dx[i] = dy[i] * y[i] * (1 - y[i]); #endif } } template <> __global__ void _SigmoidGrad<half>( const int nthreads, const half* dy, const half* y, half* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float val = __half2float(y[i]); dx[i] = __float2half(__half2float(dy[i]) * val * (1.f - val)); } } // SigmoidGrad template <> __global__ void _SigmoidGrad<half2>( const int nthreads, const half2* dy, const half2* y, half2* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const float2 val = __half22float2(y[i]); const float2 grad = __half22float2(dy[i]); dx[i] = __floats2half2_rn( grad.x * val.x * (1.f - val.x), grad.y * val.y * (1.f - val.y)); } } // SigmoidGrad } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void Sigmoid<float16, CUDAContext>( const int count, const float16* x, float16* y, CUDAContext* ctx) { if ((count & 1) == 0) { _Sigmoid<<<CUDA_BLOCKS(count >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count >> 1, reinterpret_cast<const half2*>(x), reinterpret_cast<half2*>(y)); } else { _Sigmoid<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } } template <> void SigmoidGrad<float16, CUDAContext>( const int count, const float16* dy, const float16* y, float16* dx, CUDAContext* ctx) { if ((count & 1) == 0) { _SigmoidGrad<<< CUDA_BLOCKS(count >> 1), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count >> 1, reinterpret_cast<const half2*>(dy), reinterpret_cast<const half2*>(y), reinterpret_cast<half2*>(dx)); } else { _SigmoidGrad<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count, reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } } // SigmoidGrad #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void Sigmoid<T, CUDAContext>( \ const int count, const T* x, T* y, CUDAContext* ctx) { \ _Sigmoid<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ count, x, y); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void SigmoidGrad<T, CUDAContext>( \ const int count, const T* dy, const T* y, T* dx, CUDAContext* ctx) { \ _SigmoidGrad<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ count, dy, y, dx); \ } DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_CUDA
884997497066743b9cbecf118de41ed15a043eb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __device__ int rectanglesSum(int** integralImage, int x, int y, int w, int h) { int A = x > 0 && y > 0 ? integralImage[x - 1][y - 1] : 0; int B = x + w > 0 && y > 0 ? integralImage[x + w - 1][y - 1] : 0; int C = x > 0 && y + h > 0 ? integralImage[x - 1][y + h - 1] : 0; int D = x + w > 0 && y + h > 0 ? integralImage[x + w - 1][y + h - 1] : 0; return A + D - B - C; } extern "C" __global__ void haar_type_E(int** integralImage, int* allRectangles, int numRectangles, float coeff, int* haarFeatures) { // Get an "unique id" of the thread that correspond to one pixel const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; if (tidX < numRectangles) { int x = (int) (allRectangles[tidX * 4] * coeff); int y = (int) (allRectangles[tidX * 4 + 1] * coeff); int w = (int) (allRectangles[tidX * 4 + 2] * coeff); int h = (int) (allRectangles[tidX * 4 + 3] * coeff); int mid_w = w / 2; int mid_h = h / 2; int r1 = rectanglesSum(integralImage, x, y, mid_w, mid_h); int r2 = rectanglesSum(integralImage, x + mid_w, y, mid_w, mid_h); int r3 = rectanglesSum(integralImage, x, y + mid_h, mid_w, mid_h); int r4 = rectanglesSum(integralImage, x + mid_w, y + mid_h, mid_w, mid_h); haarFeatures[tidX] = r1 - r2 - r3 + r4; } __syncthreads(); }
884997497066743b9cbecf118de41ed15a043eb3.cu
__device__ int rectanglesSum(int** integralImage, int x, int y, int w, int h) { int A = x > 0 && y > 0 ? integralImage[x - 1][y - 1] : 0; int B = x + w > 0 && y > 0 ? integralImage[x + w - 1][y - 1] : 0; int C = x > 0 && y + h > 0 ? integralImage[x - 1][y + h - 1] : 0; int D = x + w > 0 && y + h > 0 ? integralImage[x + w - 1][y + h - 1] : 0; return A + D - B - C; } extern "C" __global__ void haar_type_E(int** integralImage, int* allRectangles, int numRectangles, float coeff, int* haarFeatures) { // Get an "unique id" of the thread that correspond to one pixel const unsigned int tidX = blockIdx.x * blockDim.x + threadIdx.x; if (tidX < numRectangles) { int x = (int) (allRectangles[tidX * 4] * coeff); int y = (int) (allRectangles[tidX * 4 + 1] * coeff); int w = (int) (allRectangles[tidX * 4 + 2] * coeff); int h = (int) (allRectangles[tidX * 4 + 3] * coeff); int mid_w = w / 2; int mid_h = h / 2; int r1 = rectanglesSum(integralImage, x, y, mid_w, mid_h); int r2 = rectanglesSum(integralImage, x + mid_w, y, mid_w, mid_h); int r3 = rectanglesSum(integralImage, x, y + mid_h, mid_w, mid_h); int r4 = rectanglesSum(integralImage, x + mid_w, y + mid_h, mid_w, mid_h); haarFeatures[tidX] = r1 - r2 - r3 + r4; } __syncthreads(); }
acff06d3cc2be072a10c11653e98f60973b4847a.hip
// !!! This is a file automatically generated by hipify!!! /* * md.cu * * Created on: Jul 28, 2010 * Author: zhmurov */ #include "md.cuh" #include "../Util/Cuda.h" #include "../Util/memory.h" #include "../Util/mystl.h" #include "global.h" #include "../Potentials/PeriodicBoundary.cu" #include "../Integrators/LeapFrogIntegrator.cu" #include "../Integrators/SteepestDescentIntegrator.cu" #include "../Potentials/HarmonicPotential.cu" #include "../Potentials/AnglePotential.cu" #include "../Potentials/DihedralPotential.cu" #include "../Potentials/ImproperPotential.cu" #include "../Potentials/NonBondedPotential.cu" #include "../Potentials/SASAPotential.cu" #include "../Potentials/GenBornPotential.cu" #include "../Potentials/LangevinHeatBath.cu" #include "../Potentials/ReplicaExchange.cu" #include "../Potentials/FixForcePotential.cu" #include "../Potentials/UmbrellaSampling.cu" #include "../Potentials/RepulsiveBoundaryPotential.cu" #include "../Potentials/HarmonicConstraints.cu" //#include "../Potentials/GBSWPotential.cu" #include "../Potentials/GBPotential.cu" #include "../Potentials/PullingPlanePotential.cu" #include "../Potentials/PushingPlanePotential.cu" #include "../Potentials/DrumPotential.cu" #include "../Potentials/FragmemPotential.cu" #include "../Updaters/CoordinatesOutputManagerDCD.cu" #include "../Updaters/EnergyOutputManager.cu" #include "../Updaters/PairsListsUpdater.cu" #include "../Updaters/RestartOutputManager.cu" #include "../Updaters/RigidBody.cu" #include "../Updaters/PairsMeshUpdater.cu" #include "../ConstrAlgorithms/shake.cu" #include "../ConstrAlgorithms/ccma.cu" //#include "SOPGPU/SOPGPUParameterizer.cu" class LogMD: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<md_core> " << message << std::endl; } } log_md; #define LOG LogStream(log_md) Potential** potentials; Updater** updaters; Integrator* integrator; ConstrAlg** constrAlgs; EnergyOutput** energyOutputs; Restarter** restarters; int potentialsCount; int updatersCount; int constrAlgsCount; int energyOutputsCount; int restartersCount; long int lastStepCoordCopied = -1; long int lastStepVelCopied = -1; void initAtomTypesOnGPU(); void copyCoordinatesToGPU(int traj); void copyVelocitiesToGPU(int traj); void copyMassesToGPU(); void copyForcesToGPU(); void copyAtomTypesToGPU(); void copyCoordinatesFromGPU(bool force); void copyVelocitiesFromGPU(); void addRestarter(const char* name, void (*save)(FILE*), void (*load)(FILE*)){ for (const char* i = name; *i; i++) { if (isspace(*i)) { DIE("internal error: restarter name contain whitespace"); } } Restarter* r = (Restarter*)malloc(sizeof(Restarter)); if (!r) { DIE("Out of memory"); } r->save = save; r->load = load; if (snprintf(r->name, sizeof(r->name), "%s", name) >= (int) sizeof(r->name)) { DIE("500 Program internal error"); } restarters[restartersCount++] = r; } void launchRestarters(FILE* keyf) { for (int i = 0; i < restartersCount; i++) { char name[4096]; if (fscanf(keyf, "\n%s\n", name) != 1) DIE("Cannot read restarter #%d", i); if (strcmp(name, restarters[i]->name) != 0) { DIE("Got wrong restarter name '%s', expected '%s'", name, restarters[i]->name); } restarters[i]->load(keyf); } } void dumpTOP(){ TOPData top; top.atomCount = topology.atomCount; top.atoms = (TOPAtom*)calloc(top.atomCount, sizeof(TOPAtom)); int i; for(i = 0; i < topology.atomCount; i++){ top.atoms[i].id = topology.atoms[i].id; sprintf(top.atoms[i].type, "%d", topology.atoms[i].typeId); top.atoms[i].resid = topology.atoms[i].resid; sprintf(top.atoms[i].resName, "%s", topology.atoms[i].resName); sprintf(top.atoms[i].name, "%s", topology.atoms[i].name); top.atoms[i].chain = topology.atoms[i].segment[0]; top.atoms[i].charge = topology.atoms[i].charge; top.atoms[i].mass = topology.atoms[i].mass; } top.bondCount = topology.bondCount; top.bonds = (TOPPair*)calloc(top.bondCount, sizeof(TOPPair)); int b; for(b = 0; b < topology.bondCount; b++){ top.bonds[b].i = topology.bonds[b].i; top.bonds[b].j = topology.bonds[b].j; top.bonds[b].func = 1; top.bonds[b].c0 = topology.bonds[b].b0; top.bonds[b].c1 = topology.bonds[b].kb; } top.angleCount = topology.angleCount; top.angles = (TOPAngle*)calloc(top.angleCount, sizeof(TOPAngle)); int a; for(a = 0; a < top.angleCount; a++){ top.angles[a].i = topology.angles[a].i; top.angles[a].j = topology.angles[a].j; top.angles[a].k = topology.angles[a].k; top.angles[a].func = 1; top.angles[a].c0 = topology.angles[a].theta0*180.0/M_PI; top.angles[a].c1 = topology.angles[a].ktheta; } int dihedralCount = 0; int d; for(d = 0; d < topology.dihedralCount; d++){ dihedralCount += topology.dihedrals[d].multiplicity; } for(d = 0; d < topology.improperCount; d++){ dihedralCount += topology.impropers[d].multiplicity; } top.dihedralCount = dihedralCount; top.dihedrals = (TOPDihedral*)calloc(top.dihedralCount, sizeof(TOPDihedral)); dihedralCount = 0; for(d = 0; d < topology.dihedralCount; d++){ for(i = 0; i < topology.dihedrals[d].multiplicity; i++){ top.dihedrals[dihedralCount].i = topology.dihedrals[d].i; top.dihedrals[dihedralCount].j = topology.dihedrals[d].j; top.dihedrals[dihedralCount].k = topology.dihedrals[d].k; top.dihedrals[dihedralCount].l = topology.dihedrals[d].l; top.dihedrals[dihedralCount].func = 1; top.dihedrals[dihedralCount].parCount = 3; top.dihedrals[dihedralCount].c0 = topology.dihedrals[d].delta[i]*180.0/M_PI; top.dihedrals[dihedralCount].c1 = topology.dihedrals[d].kchi[i]; top.dihedrals[dihedralCount].c2 = topology.dihedrals[d].n[i]; dihedralCount++; } } for(d = 0; d < topology.improperCount; d++){ for(i = 0; i < topology.impropers[d].multiplicity; i++){ top.dihedrals[dihedralCount].i = topology.impropers[d].i; top.dihedrals[dihedralCount].j = topology.impropers[d].j; top.dihedrals[dihedralCount].k = topology.impropers[d].k; top.dihedrals[dihedralCount].l = topology.impropers[d].l; top.dihedrals[dihedralCount].func = 2; top.dihedrals[dihedralCount].parCount = 2; top.dihedrals[dihedralCount].c0 = topology.impropers[d].psi0[i]*180.0/M_PI; top.dihedrals[dihedralCount].c1 = topology.impropers[d].kpsi[i]; //top.dihedrals[dihedralCount].c2 = topology.impropers[d].n[i]; dihedralCount++; } } top.exclusionCount = topology.exclusionsCount; top.exclusions = (TOPExclusion*)calloc(top.exclusionCount, sizeof(TOPExclusion)); int e; for(e = 0; e < top.exclusionCount; e++){ top.exclusions[e].i = topology.exclusions[e].i; top.exclusions[e].j = topology.exclusions[e].j; top.exclusions[e].func = 1; } top.pairsCount = 0; writeTOP("dump.top", &top); } void initGPU(){ dumpTOP(); LOG << "Preparing system on a GPU..."; if (parameters.device >= 0) { hipSetDevice(parameters.device); } else { hipGetDevice(&parameters.device); // Allow automagic to do all the work } hipGetDeviceProperties(&deviceProps, parameters.device); gsystem.N = topology.atomCount; gsystem.Nsim = getIntegerParameter(PARAMETER_NSIM, gsystem.N); if(gsystem.Nsim % BLOCK_SIZE != 0){ gsystem.widthSim = (gsystem.Nsim/16 + 1)*16; } else { gsystem.widthSim = gsystem.Nsim; } gsystem.Ntot = gsystem.N*parameters.Ntr; if(gsystem.Ntot % BLOCK_SIZE != 0){ gsystem.widthTot = (gsystem.Ntot/BLOCK_SIZE + 1)*BLOCK_SIZE; } else { gsystem.widthTot = gsystem.Ntot; } LOG << "Preparing data for " << parameters.Ntr << " trajectories (" << gsystem.N << " atoms in a system, " << gsystem.Ntot << " total atoms)."; LOG << "Arrays will be aligned to the width of " << gsystem.widthSim << " and total width of " << gsystem.widthTot; allocateCPU((void**)&gsystem.h_coord, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_vel, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_m, atomTypesCount*sizeof(float)); allocateCPU((void**)&gsystem.h_forces, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_atomTypes, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&gsystem.d_coord, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_midcoord, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_vel, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_m, atomTypesCount*sizeof(float)); allocateGPU((void**)&gsystem.d_forces, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_atomTypes, gsystem.Ntot*sizeof(int)); hipMemset(gsystem.d_forces, 0, gsystem.Ntot*sizeof(float4)); hipMemset(gsystem.d_vel, 0, gsystem.Ntot*sizeof(float4)); potentialsCount = 0; updatersCount = 0; energyOutputsCount = 0; restartersCount = 0; potentials = (Potential**)calloc(MAX_POTENTIALS_COUNT, sizeof(Potential*)); int p, u, i; for(p = 0; p < MAX_POTENTIALS_COUNT; p++){ potentials[p] = (Potential*)malloc(sizeof(Potential*)); } updaters = (Updater**)calloc(MAX_UPDATERS_COUNT, sizeof(Updater*)); for(u = 0; u < MAX_UPDATERS_COUNT; u++){ updaters[u] = (Updater*)malloc(sizeof(Updater*)); } constrAlgs = (ConstrAlg**)calloc(MAX_CONSTR_ALGS_COUNT, sizeof(ConstrAlg*)); for(i = 0; i < MAX_CONSTR_ALGS_COUNT; i++){ constrAlgs[i] = (ConstrAlg*)malloc(sizeof(ConstrAlg*)); } energyOutputs = (EnergyOutput**)calloc(MAX_ENERGY_OUTPUT_COUNT, sizeof(EnergyOutput*)); for(i = 0; i < MAX_ENERGY_OUTPUT_COUNT; i++){ energyOutputs[i] = (EnergyOutput*)malloc(sizeof(EnergyOutput*)); } restarters = (Restarter**)calloc(MAX_RESTARTERS_COUNT, sizeof(Restarter*)); for(i = 0; i < MAX_RESTARTERS_COUNT; i++){ restarters[i] = (Restarter*)malloc(sizeof(Restarter*)); } int traj; char trajnum[10]; char trajFilename[100]; for(traj = 0; traj < parameters.Ntr; traj++){ sprintf(trajnum, "%d", traj + parameters.firstrun); replaceString(trajFilename, parameters.coordFilename, trajnum, "<run>"); readCoordinates(trajFilename); if(strcmp(parameters.velFilename, PARAMETER_STRING_UNDEFINED) != 0){ replaceString(trajFilename, parameters.velFilename, trajnum, "<run>"); readVelocities(trajFilename); } else { float T = getFloatParameter(PARAMETER_INITIAL_TEMPERATURE, -1.0f); if(T == -1.0f){ T = getFloatParameter(PARAMETER_TEMPERATURE, 0.0f); } LOG << "generating velocities with T=" << T << " K"; generateVelocities(T); } copyCoordinatesToGPU(traj); copyVelocitiesToGPU(traj); } checkCUDAError("copying coordinate/velocities to GPU"); copyMassesToGPU(); checkCUDAError("copying masses to GPU"); copyForcesToGPU(); checkCUDAError("copying forces to GPU"); copyAtomTypesToGPU(); checkCUDAError("copying atoms to GPU"); hipMemcpyToSymbol(c_gsystem, &gsystem, sizeof(GSystem), 0, hipMemcpyHostToDevice); checkCUDAError("init c_gsystem"); char integratorName[100]; getMaskedParameter(integratorName, PARAMETER_INTEGRATOR, PARAMETER_VALUE_INTEGRATOR_LEAPFROG); if(strcmp(integratorName, PARAMETER_VALUE_INTEGRATOR_STEEPEST_DESCENT) == 0){ LOG << "Steepest Descent integrator requested. Energy minimization will be performed."; sd_integrator::create(); } else if(strcmp(integratorName, PARAMETER_VALUE_INTEGRATOR_LEAPFROG) == 0){ LOG << "Leap-Frog integrator will be used."; leapfrog_integrator::create(); } checkCUDAError("init integrator"); char rigidbonds[100]; getMaskedParameter(rigidbonds, PARAMETER_RIGIDBONDS, PARAMETER_VALUE_RIGIDBONDS_NONE); if(strcmp(rigidbonds, PARAMETER_VALUE_RIGIDBONDS_HYDROGEN) == 0){ LOG << "Hbond lengths will be constrained with SHAKE algorithm\n"; shake_constrAlg::create(); } else if(strcmp(rigidbonds, PARAMETER_VALUE_RIGIDBONDS_ALL) == 0){ LOG << "All bond lengths will be constrained with CCMA algorithm\n"; ccma_constrAlg::create(); } else if(strcmp(integratorName, PARAMETER_VALUE_RIGIDBONDS_NONE) == 0){ LOG << "No constraints will be appied.\n"; } checkCUDAError("init constraint algorithms"); umbrella_sampling::create(); checkCUDAError("Init umbrella sampling"); harmonic_potential::create(); checkCUDAError("init harmonic potential"); angle_potential::create(); checkCUDAError("init angle potential"); dihedral_potential::create(); checkCUDAError("init dihedral potential"); improper_potential::create(); checkCUDAError("init improper potential"); non_bonded_potential::create(); checkCUDAError("init nonbonded potential"); sasa_potential::create(); checkCUDAError("init SASA potential"); if(getYesNoParameter(PARAMETER_LANGEVIN_ON, DEFAULT_LANGEVIN_ON)){ langevin_heat_bath::create(); checkCUDAError("init Langevin Heat-Bath potential"); } repulsive_boundary::create(); checkCUDAError("init repulsive boundary"); periodic_boundary::create(); checkCUDAError("init periodic boundary"); harmonic_constraints::create(); checkCUDAError("init harmonic constraints boundary"); //gbsw_potential::create(); //checkCUDAError("init GBSW potential"); gb_potential::create(); checkCUDAError("init GB potential"); // Pair list must be updated after we finish LD rigid_body::create(); checkCUDAError("init Rigid Body"); pairslist_updater::create(); checkCUDAError("init pairlist updater"); // Since GenBorn Potential uses pairlists in its initialization genborn_potential::create(); checkCUDAError("init GenBorn potential"); fixforce_potential::create(); checkCUDAError("init Fix-Force potential"); pulling_plane_potential::create(); checkCUDAError("init Pulling-Plane potential"); pushing_plane_potential::create(); checkCUDAError("init Pushing-Plane potential"); drum_potential::create(); checkCUDAError("init Drum potential"); fragmem_potential::create(); checkCUDAError("init FragMem potential"); coordinates_output_dcd::create(); checkCUDAError("init DCD output manager"); energy_output::create(); checkCUDAError("init energy output manager"); // Replica exchange updater must be called after output manager, bacause latter computes energies replica_exchange::create(); checkCUDAError("init replica exchange"); restart_output::create(); checkCUDAError("init output manager"); pairsmesh::create(); checkCUDAError("init mesh updater"); hipBindTexture(0, t_coord, gsystem.d_coord, gsystem.Ntot*sizeof(float4)); hipBindTexture(0, t_m, gsystem.d_m, atomTypesCount*sizeof(float)); hipBindTexture(0, t_atomTypes, gsystem.d_atomTypes, gsystem.Ntot*sizeof(float)); checkCUDAError("init"); LOG << "Done preparing system on a GPU."; printMemoryUsed(); } void compute(){ int p, u, nav; nav = updaters[0]->frequency; for(u = 0; u < updatersCount; u++){ nav = GCD(nav, updaters[u]->frequency); } int i, traj; firststep = getLongIntegerParameter(PARAMETER_FIRSTSTEP, 0); step = firststep; // This is now implemented in parameters.cpp and read from restartkey file //for(traj = 0; traj < parameters.Ntr; traj++){ // trajectoryTime[traj] = firststep*integrator->h; //} printTime(step - firststep); while(step < parameters.numsteps){ for(u = 0; u < updatersCount; u++){ if((step - firststep) % updaters[u]->frequency == 0){ hipDeviceSynchronize(); updaters[u]->update(); checkCUDAError(updaters[u]->name); } } for(i = 0; i < nav; i++){ for(p = 0; p < potentialsCount; p++){ //hipDeviceSynchronize(); potentials[p]->compute(); checkCUDAError(potentials[p]->name); } /*hipMemcpy(gsystem.h_forces, gsystem.d_forces, gsystem.Ntot*sizeof(float4), hipMemcpyDeviceToHost); FILE* out = fopen("forces.dat", "w"); for(i = 0; i < gsystem.N; i++){ fprintf(out, "%d\t%f\t%f\t%f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z); } fclose(out); exit(0);*/ step++; hipDeviceSynchronize(); integrator->integrate(); hipDeviceSynchronize(); for(p = 0; p < constrAlgsCount; p++){ constrAlgs[p]->compute(); checkCUDAError(constrAlgs[p]->name); hipDeviceSynchronize(); } integrator->finalize(); //checkCUDAError(integrator->name); hipDeviceSynchronize(); } for(traj = 0; traj < parameters.Ntr; traj++){ trajectoryTime[traj] += nav*integrator->h; } } for(u = 0; u < updatersCount; u++){ if(step % updaters[u]->frequency == 0){ hipDeviceSynchronize(); updaters[u]->update(); checkCUDAError(updaters[u]->name); } } checkCUDAError("finalizing"); for(p = 0; p < potentialsCount; p++){ potentials[p]->destroy(); } for(u = 0; u < updatersCount; u++){ updaters[u]->destroy(); } } void copyCoordinatesToGPU(int traj, int reset = 1){ int i; for(i = 0; i < gsystem.N && reset; i++){ gsystem.h_coord[i + traj*gsystem.N].x = topology.atoms[i].x; gsystem.h_coord[i + traj*gsystem.N].y = topology.atoms[i].y; gsystem.h_coord[i + traj*gsystem.N].z = topology.atoms[i].z; gsystem.h_coord[i + traj*gsystem.N].w = (float)topology.atoms[i].typeId; } hipMemcpy(&gsystem.d_coord[traj*gsystem.N], &gsystem.h_coord[traj*gsystem.N], gsystem.N*sizeof(float4), hipMemcpyHostToDevice); hipMemcpy(&gsystem.d_midcoord[traj*gsystem.N], &gsystem.h_coord[traj*gsystem.N], gsystem.N*sizeof(float4), hipMemcpyHostToDevice);//the coordinates will be overwritten on the first step, we do it to get atomid's in midcoord } void copyCoordinatesToGPU(int traj) { copyCoordinatesToGPU(traj, 1); } void copyVelocitiesToGPU(int traj, int reset = 1){ int i; for(i = 0; i < gsystem.N && reset; i++){ gsystem.h_vel[i + traj*gsystem.N].x = topology.atoms[i].vx; gsystem.h_vel[i + traj*gsystem.N].y = topology.atoms[i].vy; gsystem.h_vel[i + traj*gsystem.N].z = topology.atoms[i].vz; gsystem.h_vel[i + traj*gsystem.N].w = 0.0f; } hipMemcpy(&gsystem.d_vel[traj*gsystem.N], &gsystem.h_vel[traj*gsystem.N], gsystem.N*sizeof(float4), hipMemcpyHostToDevice); } void copyVelocitiesToGPU(int traj) { copyVelocitiesToGPU(traj, 1); } void copyMassesToGPU(){ int i; for(i = 0; i < atomTypesCount; i++){ gsystem.h_m[i] = atomTypes[i].mass; } hipMemcpy(gsystem.d_m, gsystem.h_m, atomTypesCount*sizeof(float), hipMemcpyHostToDevice); } void copyForcesToGPU(){ int i, itot, traj; for(i = 0; i < gsystem.N; i++){ gsystem.h_forces[i].x = 0.0f; gsystem.h_forces[i].y = 0.0f; gsystem.h_forces[i].z = 0.0f; gsystem.h_forces[i].w = topology.atoms[i].mass; } for(traj = 1; traj < parameters.Ntr; traj ++){ for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; gsystem.h_forces[itot] = gsystem.h_forces[i]; } } hipMemcpy(gsystem.d_forces, gsystem.h_forces, gsystem.Ntot*sizeof(float4), hipMemcpyHostToDevice); } void copyAtomTypesToGPU(){ int i, itot, traj; for(i = 0; i < gsystem.N; i++){ gsystem.h_atomTypes[i] = topology.atoms[i].typeId; } for(traj = 1; traj < parameters.Ntr; traj ++){ for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; gsystem.h_atomTypes[itot] = gsystem.h_atomTypes[i]; } } hipMemcpy(gsystem.d_atomTypes, gsystem.h_atomTypes, gsystem.Ntot*sizeof(int), hipMemcpyHostToDevice); } void copyCoordinatesFromGPU(bool force){ if(force || lastStepCoordCopied != step){ hipMemcpy(gsystem.h_coord, gsystem.d_coord, gsystem.Ntot*sizeof(float4), hipMemcpyDeviceToHost); lastStepCoordCopied = step; } } void copyVelocitiesFromGPU(){ if(lastStepVelCopied != step){ hipMemcpy(gsystem.h_vel, gsystem.d_vel, gsystem.Ntot*sizeof(float4), hipMemcpyDeviceToHost); lastStepVelCopied = step; } }
acff06d3cc2be072a10c11653e98f60973b4847a.cu
/* * md.cu * * Created on: Jul 28, 2010 * Author: zhmurov */ #include "md.cuh" #include "../Util/Cuda.h" #include "../Util/memory.h" #include "../Util/mystl.h" #include "global.h" #include "../Potentials/PeriodicBoundary.cu" #include "../Integrators/LeapFrogIntegrator.cu" #include "../Integrators/SteepestDescentIntegrator.cu" #include "../Potentials/HarmonicPotential.cu" #include "../Potentials/AnglePotential.cu" #include "../Potentials/DihedralPotential.cu" #include "../Potentials/ImproperPotential.cu" #include "../Potentials/NonBondedPotential.cu" #include "../Potentials/SASAPotential.cu" #include "../Potentials/GenBornPotential.cu" #include "../Potentials/LangevinHeatBath.cu" #include "../Potentials/ReplicaExchange.cu" #include "../Potentials/FixForcePotential.cu" #include "../Potentials/UmbrellaSampling.cu" #include "../Potentials/RepulsiveBoundaryPotential.cu" #include "../Potentials/HarmonicConstraints.cu" //#include "../Potentials/GBSWPotential.cu" #include "../Potentials/GBPotential.cu" #include "../Potentials/PullingPlanePotential.cu" #include "../Potentials/PushingPlanePotential.cu" #include "../Potentials/DrumPotential.cu" #include "../Potentials/FragmemPotential.cu" #include "../Updaters/CoordinatesOutputManagerDCD.cu" #include "../Updaters/EnergyOutputManager.cu" #include "../Updaters/PairsListsUpdater.cu" #include "../Updaters/RestartOutputManager.cu" #include "../Updaters/RigidBody.cu" #include "../Updaters/PairsMeshUpdater.cu" #include "../ConstrAlgorithms/shake.cu" #include "../ConstrAlgorithms/ccma.cu" //#include "SOPGPU/SOPGPUParameterizer.cu" class LogMD: public ILog { virtual void Write(const char* message) const { std::cout << makeTimePrefix() << "<md_core> " << message << std::endl; } } log_md; #define LOG LogStream(log_md) Potential** potentials; Updater** updaters; Integrator* integrator; ConstrAlg** constrAlgs; EnergyOutput** energyOutputs; Restarter** restarters; int potentialsCount; int updatersCount; int constrAlgsCount; int energyOutputsCount; int restartersCount; long int lastStepCoordCopied = -1; long int lastStepVelCopied = -1; void initAtomTypesOnGPU(); void copyCoordinatesToGPU(int traj); void copyVelocitiesToGPU(int traj); void copyMassesToGPU(); void copyForcesToGPU(); void copyAtomTypesToGPU(); void copyCoordinatesFromGPU(bool force); void copyVelocitiesFromGPU(); void addRestarter(const char* name, void (*save)(FILE*), void (*load)(FILE*)){ for (const char* i = name; *i; i++) { if (isspace(*i)) { DIE("internal error: restarter name contain whitespace"); } } Restarter* r = (Restarter*)malloc(sizeof(Restarter)); if (!r) { DIE("Out of memory"); } r->save = save; r->load = load; if (snprintf(r->name, sizeof(r->name), "%s", name) >= (int) sizeof(r->name)) { DIE("500 Program internal error"); } restarters[restartersCount++] = r; } void launchRestarters(FILE* keyf) { for (int i = 0; i < restartersCount; i++) { char name[4096]; if (fscanf(keyf, "\n%s\n", name) != 1) DIE("Cannot read restarter #%d", i); if (strcmp(name, restarters[i]->name) != 0) { DIE("Got wrong restarter name '%s', expected '%s'", name, restarters[i]->name); } restarters[i]->load(keyf); } } void dumpTOP(){ TOPData top; top.atomCount = topology.atomCount; top.atoms = (TOPAtom*)calloc(top.atomCount, sizeof(TOPAtom)); int i; for(i = 0; i < topology.atomCount; i++){ top.atoms[i].id = topology.atoms[i].id; sprintf(top.atoms[i].type, "%d", topology.atoms[i].typeId); top.atoms[i].resid = topology.atoms[i].resid; sprintf(top.atoms[i].resName, "%s", topology.atoms[i].resName); sprintf(top.atoms[i].name, "%s", topology.atoms[i].name); top.atoms[i].chain = topology.atoms[i].segment[0]; top.atoms[i].charge = topology.atoms[i].charge; top.atoms[i].mass = topology.atoms[i].mass; } top.bondCount = topology.bondCount; top.bonds = (TOPPair*)calloc(top.bondCount, sizeof(TOPPair)); int b; for(b = 0; b < topology.bondCount; b++){ top.bonds[b].i = topology.bonds[b].i; top.bonds[b].j = topology.bonds[b].j; top.bonds[b].func = 1; top.bonds[b].c0 = topology.bonds[b].b0; top.bonds[b].c1 = topology.bonds[b].kb; } top.angleCount = topology.angleCount; top.angles = (TOPAngle*)calloc(top.angleCount, sizeof(TOPAngle)); int a; for(a = 0; a < top.angleCount; a++){ top.angles[a].i = topology.angles[a].i; top.angles[a].j = topology.angles[a].j; top.angles[a].k = topology.angles[a].k; top.angles[a].func = 1; top.angles[a].c0 = topology.angles[a].theta0*180.0/M_PI; top.angles[a].c1 = topology.angles[a].ktheta; } int dihedralCount = 0; int d; for(d = 0; d < topology.dihedralCount; d++){ dihedralCount += topology.dihedrals[d].multiplicity; } for(d = 0; d < topology.improperCount; d++){ dihedralCount += topology.impropers[d].multiplicity; } top.dihedralCount = dihedralCount; top.dihedrals = (TOPDihedral*)calloc(top.dihedralCount, sizeof(TOPDihedral)); dihedralCount = 0; for(d = 0; d < topology.dihedralCount; d++){ for(i = 0; i < topology.dihedrals[d].multiplicity; i++){ top.dihedrals[dihedralCount].i = topology.dihedrals[d].i; top.dihedrals[dihedralCount].j = topology.dihedrals[d].j; top.dihedrals[dihedralCount].k = topology.dihedrals[d].k; top.dihedrals[dihedralCount].l = topology.dihedrals[d].l; top.dihedrals[dihedralCount].func = 1; top.dihedrals[dihedralCount].parCount = 3; top.dihedrals[dihedralCount].c0 = topology.dihedrals[d].delta[i]*180.0/M_PI; top.dihedrals[dihedralCount].c1 = topology.dihedrals[d].kchi[i]; top.dihedrals[dihedralCount].c2 = topology.dihedrals[d].n[i]; dihedralCount++; } } for(d = 0; d < topology.improperCount; d++){ for(i = 0; i < topology.impropers[d].multiplicity; i++){ top.dihedrals[dihedralCount].i = topology.impropers[d].i; top.dihedrals[dihedralCount].j = topology.impropers[d].j; top.dihedrals[dihedralCount].k = topology.impropers[d].k; top.dihedrals[dihedralCount].l = topology.impropers[d].l; top.dihedrals[dihedralCount].func = 2; top.dihedrals[dihedralCount].parCount = 2; top.dihedrals[dihedralCount].c0 = topology.impropers[d].psi0[i]*180.0/M_PI; top.dihedrals[dihedralCount].c1 = topology.impropers[d].kpsi[i]; //top.dihedrals[dihedralCount].c2 = topology.impropers[d].n[i]; dihedralCount++; } } top.exclusionCount = topology.exclusionsCount; top.exclusions = (TOPExclusion*)calloc(top.exclusionCount, sizeof(TOPExclusion)); int e; for(e = 0; e < top.exclusionCount; e++){ top.exclusions[e].i = topology.exclusions[e].i; top.exclusions[e].j = topology.exclusions[e].j; top.exclusions[e].func = 1; } top.pairsCount = 0; writeTOP("dump.top", &top); } void initGPU(){ dumpTOP(); LOG << "Preparing system on a GPU..."; if (parameters.device >= 0) { cudaSetDevice(parameters.device); } else { cudaGetDevice(&parameters.device); // Allow automagic to do all the work } cudaGetDeviceProperties(&deviceProps, parameters.device); gsystem.N = topology.atomCount; gsystem.Nsim = getIntegerParameter(PARAMETER_NSIM, gsystem.N); if(gsystem.Nsim % BLOCK_SIZE != 0){ gsystem.widthSim = (gsystem.Nsim/16 + 1)*16; } else { gsystem.widthSim = gsystem.Nsim; } gsystem.Ntot = gsystem.N*parameters.Ntr; if(gsystem.Ntot % BLOCK_SIZE != 0){ gsystem.widthTot = (gsystem.Ntot/BLOCK_SIZE + 1)*BLOCK_SIZE; } else { gsystem.widthTot = gsystem.Ntot; } LOG << "Preparing data for " << parameters.Ntr << " trajectories (" << gsystem.N << " atoms in a system, " << gsystem.Ntot << " total atoms)."; LOG << "Arrays will be aligned to the width of " << gsystem.widthSim << " and total width of " << gsystem.widthTot; allocateCPU((void**)&gsystem.h_coord, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_vel, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_m, atomTypesCount*sizeof(float)); allocateCPU((void**)&gsystem.h_forces, gsystem.Ntot*sizeof(float4)); allocateCPU((void**)&gsystem.h_atomTypes, gsystem.Ntot*sizeof(int)); allocateGPU((void**)&gsystem.d_coord, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_midcoord, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_vel, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_m, atomTypesCount*sizeof(float)); allocateGPU((void**)&gsystem.d_forces, gsystem.Ntot*sizeof(float4)); allocateGPU((void**)&gsystem.d_atomTypes, gsystem.Ntot*sizeof(int)); cudaMemset(gsystem.d_forces, 0, gsystem.Ntot*sizeof(float4)); cudaMemset(gsystem.d_vel, 0, gsystem.Ntot*sizeof(float4)); potentialsCount = 0; updatersCount = 0; energyOutputsCount = 0; restartersCount = 0; potentials = (Potential**)calloc(MAX_POTENTIALS_COUNT, sizeof(Potential*)); int p, u, i; for(p = 0; p < MAX_POTENTIALS_COUNT; p++){ potentials[p] = (Potential*)malloc(sizeof(Potential*)); } updaters = (Updater**)calloc(MAX_UPDATERS_COUNT, sizeof(Updater*)); for(u = 0; u < MAX_UPDATERS_COUNT; u++){ updaters[u] = (Updater*)malloc(sizeof(Updater*)); } constrAlgs = (ConstrAlg**)calloc(MAX_CONSTR_ALGS_COUNT, sizeof(ConstrAlg*)); for(i = 0; i < MAX_CONSTR_ALGS_COUNT; i++){ constrAlgs[i] = (ConstrAlg*)malloc(sizeof(ConstrAlg*)); } energyOutputs = (EnergyOutput**)calloc(MAX_ENERGY_OUTPUT_COUNT, sizeof(EnergyOutput*)); for(i = 0; i < MAX_ENERGY_OUTPUT_COUNT; i++){ energyOutputs[i] = (EnergyOutput*)malloc(sizeof(EnergyOutput*)); } restarters = (Restarter**)calloc(MAX_RESTARTERS_COUNT, sizeof(Restarter*)); for(i = 0; i < MAX_RESTARTERS_COUNT; i++){ restarters[i] = (Restarter*)malloc(sizeof(Restarter*)); } int traj; char trajnum[10]; char trajFilename[100]; for(traj = 0; traj < parameters.Ntr; traj++){ sprintf(trajnum, "%d", traj + parameters.firstrun); replaceString(trajFilename, parameters.coordFilename, trajnum, "<run>"); readCoordinates(trajFilename); if(strcmp(parameters.velFilename, PARAMETER_STRING_UNDEFINED) != 0){ replaceString(trajFilename, parameters.velFilename, trajnum, "<run>"); readVelocities(trajFilename); } else { float T = getFloatParameter(PARAMETER_INITIAL_TEMPERATURE, -1.0f); if(T == -1.0f){ T = getFloatParameter(PARAMETER_TEMPERATURE, 0.0f); } LOG << "generating velocities with T=" << T << " K"; generateVelocities(T); } copyCoordinatesToGPU(traj); copyVelocitiesToGPU(traj); } checkCUDAError("copying coordinate/velocities to GPU"); copyMassesToGPU(); checkCUDAError("copying masses to GPU"); copyForcesToGPU(); checkCUDAError("copying forces to GPU"); copyAtomTypesToGPU(); checkCUDAError("copying atoms to GPU"); cudaMemcpyToSymbol(c_gsystem, &gsystem, sizeof(GSystem), 0, cudaMemcpyHostToDevice); checkCUDAError("init c_gsystem"); char integratorName[100]; getMaskedParameter(integratorName, PARAMETER_INTEGRATOR, PARAMETER_VALUE_INTEGRATOR_LEAPFROG); if(strcmp(integratorName, PARAMETER_VALUE_INTEGRATOR_STEEPEST_DESCENT) == 0){ LOG << "Steepest Descent integrator requested. Energy minimization will be performed."; sd_integrator::create(); } else if(strcmp(integratorName, PARAMETER_VALUE_INTEGRATOR_LEAPFROG) == 0){ LOG << "Leap-Frog integrator will be used."; leapfrog_integrator::create(); } checkCUDAError("init integrator"); char rigidbonds[100]; getMaskedParameter(rigidbonds, PARAMETER_RIGIDBONDS, PARAMETER_VALUE_RIGIDBONDS_NONE); if(strcmp(rigidbonds, PARAMETER_VALUE_RIGIDBONDS_HYDROGEN) == 0){ LOG << "Hbond lengths will be constrained with SHAKE algorithm\n"; shake_constrAlg::create(); } else if(strcmp(rigidbonds, PARAMETER_VALUE_RIGIDBONDS_ALL) == 0){ LOG << "All bond lengths will be constrained with CCMA algorithm\n"; ccma_constrAlg::create(); } else if(strcmp(integratorName, PARAMETER_VALUE_RIGIDBONDS_NONE) == 0){ LOG << "No constraints will be appied.\n"; } checkCUDAError("init constraint algorithms"); umbrella_sampling::create(); checkCUDAError("Init umbrella sampling"); harmonic_potential::create(); checkCUDAError("init harmonic potential"); angle_potential::create(); checkCUDAError("init angle potential"); dihedral_potential::create(); checkCUDAError("init dihedral potential"); improper_potential::create(); checkCUDAError("init improper potential"); non_bonded_potential::create(); checkCUDAError("init nonbonded potential"); sasa_potential::create(); checkCUDAError("init SASA potential"); if(getYesNoParameter(PARAMETER_LANGEVIN_ON, DEFAULT_LANGEVIN_ON)){ langevin_heat_bath::create(); checkCUDAError("init Langevin Heat-Bath potential"); } repulsive_boundary::create(); checkCUDAError("init repulsive boundary"); periodic_boundary::create(); checkCUDAError("init periodic boundary"); harmonic_constraints::create(); checkCUDAError("init harmonic constraints boundary"); //gbsw_potential::create(); //checkCUDAError("init GBSW potential"); gb_potential::create(); checkCUDAError("init GB potential"); // Pair list must be updated after we finish LD rigid_body::create(); checkCUDAError("init Rigid Body"); pairslist_updater::create(); checkCUDAError("init pairlist updater"); // Since GenBorn Potential uses pairlists in its initialization genborn_potential::create(); checkCUDAError("init GenBorn potential"); fixforce_potential::create(); checkCUDAError("init Fix-Force potential"); pulling_plane_potential::create(); checkCUDAError("init Pulling-Plane potential"); pushing_plane_potential::create(); checkCUDAError("init Pushing-Plane potential"); drum_potential::create(); checkCUDAError("init Drum potential"); fragmem_potential::create(); checkCUDAError("init FragMem potential"); coordinates_output_dcd::create(); checkCUDAError("init DCD output manager"); energy_output::create(); checkCUDAError("init energy output manager"); // Replica exchange updater must be called after output manager, bacause latter computes energies replica_exchange::create(); checkCUDAError("init replica exchange"); restart_output::create(); checkCUDAError("init output manager"); pairsmesh::create(); checkCUDAError("init mesh updater"); cudaBindTexture(0, t_coord, gsystem.d_coord, gsystem.Ntot*sizeof(float4)); cudaBindTexture(0, t_m, gsystem.d_m, atomTypesCount*sizeof(float)); cudaBindTexture(0, t_atomTypes, gsystem.d_atomTypes, gsystem.Ntot*sizeof(float)); checkCUDAError("init"); LOG << "Done preparing system on a GPU."; printMemoryUsed(); } void compute(){ int p, u, nav; nav = updaters[0]->frequency; for(u = 0; u < updatersCount; u++){ nav = GCD(nav, updaters[u]->frequency); } int i, traj; firststep = getLongIntegerParameter(PARAMETER_FIRSTSTEP, 0); step = firststep; // This is now implemented in parameters.cpp and read from restartkey file //for(traj = 0; traj < parameters.Ntr; traj++){ // trajectoryTime[traj] = firststep*integrator->h; //} printTime(step - firststep); while(step < parameters.numsteps){ for(u = 0; u < updatersCount; u++){ if((step - firststep) % updaters[u]->frequency == 0){ cudaThreadSynchronize(); updaters[u]->update(); checkCUDAError(updaters[u]->name); } } for(i = 0; i < nav; i++){ for(p = 0; p < potentialsCount; p++){ //cudaThreadSynchronize(); potentials[p]->compute(); checkCUDAError(potentials[p]->name); } /*cudaMemcpy(gsystem.h_forces, gsystem.d_forces, gsystem.Ntot*sizeof(float4), cudaMemcpyDeviceToHost); FILE* out = fopen("forces.dat", "w"); for(i = 0; i < gsystem.N; i++){ fprintf(out, "%d\t%f\t%f\t%f\n", i, gsystem.h_forces[i].x, gsystem.h_forces[i].y, gsystem.h_forces[i].z); } fclose(out); exit(0);*/ step++; cudaThreadSynchronize(); integrator->integrate(); cudaThreadSynchronize(); for(p = 0; p < constrAlgsCount; p++){ constrAlgs[p]->compute(); checkCUDAError(constrAlgs[p]->name); cudaThreadSynchronize(); } integrator->finalize(); //checkCUDAError(integrator->name); cudaThreadSynchronize(); } for(traj = 0; traj < parameters.Ntr; traj++){ trajectoryTime[traj] += nav*integrator->h; } } for(u = 0; u < updatersCount; u++){ if(step % updaters[u]->frequency == 0){ cudaThreadSynchronize(); updaters[u]->update(); checkCUDAError(updaters[u]->name); } } checkCUDAError("finalizing"); for(p = 0; p < potentialsCount; p++){ potentials[p]->destroy(); } for(u = 0; u < updatersCount; u++){ updaters[u]->destroy(); } } void copyCoordinatesToGPU(int traj, int reset = 1){ int i; for(i = 0; i < gsystem.N && reset; i++){ gsystem.h_coord[i + traj*gsystem.N].x = topology.atoms[i].x; gsystem.h_coord[i + traj*gsystem.N].y = topology.atoms[i].y; gsystem.h_coord[i + traj*gsystem.N].z = topology.atoms[i].z; gsystem.h_coord[i + traj*gsystem.N].w = (float)topology.atoms[i].typeId; } cudaMemcpy(&gsystem.d_coord[traj*gsystem.N], &gsystem.h_coord[traj*gsystem.N], gsystem.N*sizeof(float4), cudaMemcpyHostToDevice); cudaMemcpy(&gsystem.d_midcoord[traj*gsystem.N], &gsystem.h_coord[traj*gsystem.N], gsystem.N*sizeof(float4), cudaMemcpyHostToDevice);//the coordinates will be overwritten on the first step, we do it to get atomid's in midcoord } void copyCoordinatesToGPU(int traj) { copyCoordinatesToGPU(traj, 1); } void copyVelocitiesToGPU(int traj, int reset = 1){ int i; for(i = 0; i < gsystem.N && reset; i++){ gsystem.h_vel[i + traj*gsystem.N].x = topology.atoms[i].vx; gsystem.h_vel[i + traj*gsystem.N].y = topology.atoms[i].vy; gsystem.h_vel[i + traj*gsystem.N].z = topology.atoms[i].vz; gsystem.h_vel[i + traj*gsystem.N].w = 0.0f; } cudaMemcpy(&gsystem.d_vel[traj*gsystem.N], &gsystem.h_vel[traj*gsystem.N], gsystem.N*sizeof(float4), cudaMemcpyHostToDevice); } void copyVelocitiesToGPU(int traj) { copyVelocitiesToGPU(traj, 1); } void copyMassesToGPU(){ int i; for(i = 0; i < atomTypesCount; i++){ gsystem.h_m[i] = atomTypes[i].mass; } cudaMemcpy(gsystem.d_m, gsystem.h_m, atomTypesCount*sizeof(float), cudaMemcpyHostToDevice); } void copyForcesToGPU(){ int i, itot, traj; for(i = 0; i < gsystem.N; i++){ gsystem.h_forces[i].x = 0.0f; gsystem.h_forces[i].y = 0.0f; gsystem.h_forces[i].z = 0.0f; gsystem.h_forces[i].w = topology.atoms[i].mass; } for(traj = 1; traj < parameters.Ntr; traj ++){ for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; gsystem.h_forces[itot] = gsystem.h_forces[i]; } } cudaMemcpy(gsystem.d_forces, gsystem.h_forces, gsystem.Ntot*sizeof(float4), cudaMemcpyHostToDevice); } void copyAtomTypesToGPU(){ int i, itot, traj; for(i = 0; i < gsystem.N; i++){ gsystem.h_atomTypes[i] = topology.atoms[i].typeId; } for(traj = 1; traj < parameters.Ntr; traj ++){ for(i = 0; i < gsystem.N; i++){ itot = gsystem.N*traj + i; gsystem.h_atomTypes[itot] = gsystem.h_atomTypes[i]; } } cudaMemcpy(gsystem.d_atomTypes, gsystem.h_atomTypes, gsystem.Ntot*sizeof(int), cudaMemcpyHostToDevice); } void copyCoordinatesFromGPU(bool force){ if(force || lastStepCoordCopied != step){ cudaMemcpy(gsystem.h_coord, gsystem.d_coord, gsystem.Ntot*sizeof(float4), cudaMemcpyDeviceToHost); lastStepCoordCopied = step; } } void copyVelocitiesFromGPU(){ if(lastStepVelCopied != step){ cudaMemcpy(gsystem.h_vel, gsystem.d_vel, gsystem.Ntot*sizeof(float4), cudaMemcpyDeviceToHost); lastStepVelCopied = step; } }
52e506042a706067793dd6f42980dc57280dfd19.hip
// !!! This is a file automatically generated by hipify!!! //OP_FlipFluidBasis //#include <Thinking.h> #include "OP_FlipFluidBasis.h" #include "MatterWaves.h" #include "Calculate.h" //class descriptor class OP_FlipFluidBasis_ClassDesc : public ClassDesc2 { public: int IsPublic() { return FALSE; } void *Create(BOOL loading = FALSE) { return new OP_FlipFluidBasis; } const TCHAR* ClassName() { return GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } SClass_ID SuperClassID() { return REF_TARGET_CLASS_ID; } Class_ID ClassID() { return OP_FLIPFLUIDBASIS_CLASS_ID; } const TCHAR* Category() { return GetString(IDS_CATEGORY_FLIPFLUID, hInstance); } const TCHAR* InternalName() { return _T("OP_FlipFluidBasis"); } HINSTANCE HInstance() { return hInstance; } }; static OP_FlipFluidBasis_ClassDesc op_flipfluidbasis_Desc; ClassDesc* GetOP_FlipFluidBasis_Desc() { return &op_flipfluidbasis_Desc; } //end class descriptor //Parameter #define FLIP_SPGROUP 50 #define FLIP_SPGROUP_SUB 51 #define FLIP_PGROUP 52 #define FLIP_PGROUP_SUB 53 #define FLIP_TYPE 54 #define FLIP_VOXEL_SIZE 55 #define FLIP_SUB_SAMPLES 56 #define FLIP_BOUNDARY_INODES 57 #define FLIP_PGROUP_SUB_DEF FALSE #define FLIP_SPGROUP_SUB_DEF FALSE #define FLIP_TYPE_1 0 #define FLIP_TYPE_2 1 #define FLIP_TYPE_3 2 #define FLIP_TYPE_DEF 0 #define FLIP_TYPE_MIN 0 #define FLIP_TYPE_MAX 2 #define FLIP_VOXEL_SIZE_DEF 0.025f #define FLIP_VOXEL_SIZE_MIN 0.0 #define FLIP_VOXEL_SIZE_MAX MAX_FVALUE #define FLIP_SUB_SAMPLES_DEF 0 #define FLIP_SUB_SAMPLES_MIN 0 #define FLIP_SUB_SAMPLES_MAX MAX_IVALUE //Node Input #define FLIP_ON_IN 0 #define FLIP_TIME_IN 1 #define FLIP_SPGROUP_IN 2 #define FLIP_SPGROUP_SUB_IN 3 #define FLIP_PGROUP_IN 4 #define FLIP_PGROUP_SUB_IN 5 #define FLIP_TYPE_IN 6 #define FLIP_VOXEL_SIZE_IN 7 #define FLIP_SUB_SAMPLES_IN 8 class BoundaryObjectValidator : public PBValidator { public: BOOL Validate(PB2Value& v){ return TRUE; } BOOL Validate(PB2Value& v, ReferenceMaker* owner, ParamID id, int tabIndex) { if(id == FLIP_BOUNDARY_INODES) { if(!v.r) return FALSE; int i; for(i = ((OP_FlipFluidBasis*)owner)->GetParamBlock()->Count(id) -1; i >= 0; i--) { if(v.r == ((OP_FlipFluidBasis*)owner)->GetParamBlock()->GetINode(id, 0, i)) return FALSE; } } return TRUE; } }; BoundaryObjectValidator object_validator; static ParamBlockDesc2 op_flipfluidbasis_ParamBlock( OP_FLIPFLUIDBASIS_PARAM_BLOCK, _T("Parameters"), 0, &op_flipfluidbasis_Desc, P_AUTO_CONSTRUCT, 0, // params FLIP_SPGROUP, _T("SourcePGroup"), TYPE_REFTARG, P_NO_REF, IDS_OP_FLIPFLUIDBASIS_SPGROUP, p_classID, PGROUP_CLASS_ID, p_end, FLIP_SPGROUP_SUB, _T("SourcePGroupSub"), TYPE_BOOL, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_SPGROUP_SUB, p_default, FLIP_SPGROUP_SUB_DEF, p_ui, TYPE_SINGLECHEKBOX, IDC_OP_FLIPFLUIDBASIS_SPGROUP_SUB, p_end, FLIP_PGROUP, _T("TargetPGroup"), TYPE_REFTARG, P_NO_REF, IDS_OP_FLIPFLUIDBASIS_PGROUP, p_classID, PGROUP_CLASS_ID, p_end, FLIP_PGROUP_SUB, _T("TargetPGroupSub"), TYPE_BOOL, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_PGROUP_SUB, p_default, FLIP_PGROUP_SUB_DEF, p_ui, TYPE_SINGLECHEKBOX, IDC_OP_FLIPFLUIDBASIS_PGROUP_SUB, p_end, FLIP_TYPE, _T("Type"), TYPE_INT, 0, IDS_OP_FLIPFLUIDBASIS_TYPE, p_default, FLIP_TYPE_DEF, p_range, FLIP_TYPE_MIN, FLIP_TYPE_MAX, p_ui, TYPE_INTLISTBOX, IDC_OP_FLIPFLUIDBASIS_TYPE, 3, IDS_OP_FLIPFLUIDBASIS_TYPE_1, IDS_OP_FLIPFLUIDBASIS_TYPE_2, IDS_OP_FLIPFLUIDBASIS_TYPE_3, p_end, FLIP_VOXEL_SIZE, _T("VoxelSize"), TYPE_WORLD, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_VOXEL_SIZE, p_default, FLIP_VOXEL_SIZE_DEF, p_range, FLIP_VOXEL_SIZE_MIN, FLIP_VOXEL_SIZE_MAX, p_ui, TYPE_SPINNER, EDITTYPE_UNIVERSE, IDC_OP_FLIPFLUIDBASIS_VOXEL_SIZE, IDC_OP_FLIPFLUIDBASIS_VOXEL_SIZE_SPIN, 0.01f, p_end, FLIP_SUB_SAMPLES, _T("SubSamples"), TYPE_INT, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_SUB_SAMPLES, p_default, FLIP_SUB_SAMPLES_DEF, p_range, FLIP_SUB_SAMPLES_MIN, FLIP_SUB_SAMPLES_MAX, p_ui, TYPE_SPINNER, EDITTYPE_INT, IDC_OP_FLIPFLUIDBASIS_SUB_SAMPLES, IDC_OP_FLIPFLUIDBASIS_SUB_SAMPLES_SPIN, 0.01f, p_end, FLIP_BOUNDARY_INODES, _T("BoundaryObjects"), TYPE_INODE_TAB, 0, P_VARIABLE_SIZE, IDS_OP_FLIPFLUIDBASIS_BOUNDARY_INODES, p_validator, &object_validator, p_ui, TYPE_NODELISTBOX, IDC_OP_FLIPFLUIDBASIS_INODE_LIST, IDC_OP_FLIPFLUIDBASIS_PICKNODE, 0, IDC_OP_FLIPFLUIDBASIS_REMOVENODE, p_end, p_end, p_end); int OP_FlipFluidBasis::Update(InOutNode *inout, DYN &dyn) { int ret, changed = 0; INOUT_GetInValue gi; gi.last_ivalid = ivalid; gi.t = inout->GetInTime(); gi.pb = pb2; gi.id = FLIP_SPGROUP; if ((ret = inout->GetInValue(gi, FLIP_SPGROUP_IN, &mSPGroup, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_SPGROUP_SUB; if ((ret = inout->GetInValue(gi, FLIP_SPGROUP_SUB_IN, &mSPGroupSub, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_PGROUP; if ((ret = inout->GetInValue(gi, FLIP_PGROUP_IN, &mPGroup, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_PGROUP_SUB; if ((ret = inout->GetInValue(gi, FLIP_PGROUP_SUB_IN, &mPGroupSub, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_TYPE; if ((ret = inout->GetInValue(gi, FLIP_TYPE_IN, &mType, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mType, FLIP_TYPE_MIN, FLIP_TYPE_MAX); changed = 1; } gi.id = FLIP_VOXEL_SIZE; if ((ret = inout->GetInValue(gi, FLIP_VOXEL_SIZE_IN, &mVoxelSize, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mVoxelSize, FLIP_VOXEL_SIZE_MIN, FLIP_VOXEL_SIZE_MAX); changed = 1; } gi.id = FLIP_SUB_SAMPLES; if ((ret = inout->GetInValue(gi, FLIP_SUB_SAMPLES_IN, &mSubSamples, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mSubSamples, FLIP_SUB_SAMPLES_MIN, FLIP_SUB_SAMPLES_MAX); changed = 1; } if (!gi.ivalid.Empty()) ivalid = gi.ivalid; return changed; } int OP_FlipFluidBasis::Calculate(int id, void *val, InOutNode *inout, DYN &dyn) { if (id >= 0) { return FALSE; } if (Update(inout, dyn) < 0) return FALSE; if (!mPGroup) return FALSE; FlipFluidBasisThreadData tdata; tdata.dt = float(dyn.global_dt) / float(TIME_TICKSPERSEC); //secound based delta time from tp //Read the particle data from tp ReadParticleDatas(inout, dyn, &tdata); //Read meshes and transformation matrix from max SetupBoundaryDatas(inout, dyn, &tdata); hipSetDevice(0); hipDeviceSynchronize(); //solver->resizeTime(tdata.dt); //improve efficiency on this, cache on GPU with single CuSolver for class, single initialization from first frame particles and keep data on GPU //With a class bool variable it is better to control when to make a initialization //But keep in mind, the data can't left on the GPU, so when the solver is done and anyone use the gpu, for rendering as sample the data are corrupt if (mDoInitial == true) { if (pb2->Count(FLIP_BOUNDARY_INODES)) { Point3 min, max; Box3 bb; bb = mINodeBoundaryDatas[0]->meshnode->GetBoundingBox(); bb = bb * mINodeBoundaryDatas[0]->objToWorld_dt; min = bb.pmin; max = bb.pmax; for (int i = 1; i < pb2->Count(FLIP_BOUNDARY_INODES); ++i) { bb = mINodeBoundaryDatas[i]->meshnode->GetBoundingBox(); bb = bb * mINodeBoundaryDatas[i]->objToWorld_dt; if (bb.pmin.x < min.x) min.x = bb.pmin.x; if (bb.pmin.y < min.y) min.y = bb.pmin.y; if (bb.pmin.z < min.z) min.z = bb.pmin.z; if (bb.pmax.x < max.x) max.x = bb.pmax.x; if (bb.pmax.y < max.y) max.y = bb.pmax.y; if (bb.pmax.z < max.z) max.z = bb.pmax.z; } solver->setDimensions(max.x - min.x, max.y - min.y, max.z - min.z); //placeholder dimensions, need to read in geometry dimensions solver->setWorldPosition(min.x, min.y, min.z); solver->setdx(mVoxelSize); } solver->setSubSamples(mSubSamples); if (prevV != nullptr) { delete[] prevV; prevV = nullptr; } if (prevP != nullptr) { delete[] prevP; prevP = nullptr; } mDoInitial = false; } solver->readParticlesFromTP(&tdata, prevV, prevP); hipDeviceSynchronize(); if(mType == 0) solver->advectSingleFrameCPU(); else if (mType == 1) solver->advectSingleFrameGPU(); hipDeviceSynchronize(); solver->writeParticlesToTP(&tdata, prevV, prevP); hipDeviceSynchronize(); //write back the changed velocity of the particles to tp WriteParticleDatas(inout, dyn, &tdata); return TRUE; } void OP_FlipFluidBasis::SetupBoundaryDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { int i, count; FlipINodeBoundaryData *ndata; INode* inode = NULL; //Checked the picked max nodes in the parameter block and load the meshes and make a transformation matrix update //mInvalidBoundaryDatas is a bool, at load or create and when the user picked or removed a max node this flag will turned on count = pb2->Count(FLIP_BOUNDARY_INODES); if(mInvalidBoundaryDatas == true) { RemoveBoundaryDatas(); mINodeBoundaryDatas.SetCount(count); for(i = 0; i < count; i++) { mINodeBoundaryDatas[i] = NULL; } mInvalidBoundaryDatas = false; } for(i = 0; i < count; i++) { inode = pb2->GetINode(FLIP_BOUNDARY_INODES, dyn.global_time, i); if(!inode) { if(mINodeBoundaryDatas[i]) delete mINodeBoundaryDatas[i]; mINodeBoundaryDatas[i] = NULL; continue; } if(!mINodeBoundaryDatas[i]) { ndata = new FlipINodeBoundaryData(); ndata->objToWorld_dt = inode->GetObjTMAfterWSM(dyn.global_time - dyn.global_dt); ndata->worldToObject_dt = Inverse(ndata->objToWorld_dt); ndata->tm_ivalid.SetEmpty(); mINodeBoundaryDatas[i] = ndata; } else ndata = mINodeBoundaryDatas[i]; if(ndata->meshnode && !ndata->meshnode->ValidInterval().InInterval(dyn.global_time)) { GetGlobalMeshManager()->Remove(ndata->meshnode); ndata->meshnode = NULL; } if(!ndata->meshnode) { ndata->meshnode = GetGlobalMeshManager()->Create(inode, dyn.global_time); //With the function GetMesh() in the TP_MeshNode class you get the max mesh from the node, with all vertex and face information //the vertex coordinates are in object space, to get the current world coordinates you must multiply with the objToWorld_dt matrix } if(!ndata->tm_ivalid.InInterval(dyn.global_time)) { ndata->tm_ivalid.SetInfinite(); ndata->objToWorld = ndata->objToWorld_dt; ndata->worldToObject = ndata->worldToObject_dt; ndata->objToWorld_dt = inode->GetObjTMAfterWSM(dyn.global_time, &ndata->tm_ivalid); ndata->worldToObject_dt = Inverse(ndata->objToWorld_dt); //objToWorld is the last transformation matrix and objToWorld_dt the current one of the node, if the matrix not animated than both are the same } } } void OP_FlipFluidBasis::RemoveBoundaryDatas() { int count = mINodeBoundaryDatas.Count(); for(int i = 0; i < count; i++) { if(mINodeBoundaryDatas[i]) delete mINodeBoundaryDatas[i]; } mINodeBoundaryDatas.SetCount(0); } void OP_FlipFluidBasis::ReadParticleDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { Tab<PGroup*> groups; int i, pid, pcount, gcount; CNode *idnode; groups.SetCount(0); if (mPGroupSub) mPGroup->EnumDyn(NULL, DYN_MSG_GROUP_GETALL, &groups); else groups.Append(1, &mPGroup, 20); gcount = groups.Count(); pcount = 0; for (i = 0; i < gcount; i++) pcount += groups[i]->partcount; if (!pcount) return; tdata->datas.SetCount(pcount); pcount = 0; for (i = 0; i < gcount; i++) { idnode = groups[i]->pidlist.GetFirstNode(); for (; idnode != NULL; idnode = idnode->GetNextNode()) { if (dyn.mastersystem->Alive(((ParticleNode*)idnode)->id)) { pid = ((ParticleNode*)idnode)->id; tdata->datas[pcount].pid = pid; tdata->datas[pcount].pos = dyn.mastersystem->Position(pid); //max have a fixed integer ticks per secound time base, so the velocity must multipled with ticks per secound to get the real speed in secound tdata->datas[pcount].vel = dyn.mastersystem->Velocity(pid) * float(TIME_TICKSPERSEC); //max time values are in integer tick tdata->datas[pcount].dt_factor = dyn.mastersystem->DTFactor(pid); tdata->datas[pcount].mass = dyn.mastersystem->Mass(pid); pcount++; } } } if (tdata->datas.Count() != pcount) tdata->datas.SetCount(pcount, FALSE); } void OP_FlipFluidBasis::WriteParticleDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { int i, pcount; pcount = tdata->datas.Count(); for (i = 0; i < pcount; i++) { //max have a fixed integer ticks per secound time base, so the velocity must divide with ticks per secound to get the max ticks speed in secound dyn.mastersystem->SetVelocity(tdata->datas[i].pid, tdata->datas[i].vel / float(TIME_TICKSPERSEC)); dyn.mastersystem->SetPosition(tdata->datas[i].pid, tdata->datas[i].pos); /* if (tdata->datas[i].flags & 16) { dyn.mastersystem->Die(tdata->datas[i].pid); } */ } } TCHAR *OP_FlipFluidBasis::GetInOutputName(InOutNode *inout, int id, BOOL input) { return NULL; } BOOL OP_FlipFluidBasis::InitInOutputs(InOutNode *inout) { inout->AddInput(PORT_TYPE_GROUP, _T("SourcePGroup"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_BOOL, _T("SourcePGroupSubs"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_GROUP, _T("TargetPGroup"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_BOOL, _T("TargetPGroupSubs"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_INT, _T("Type"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_FLOAT, _T("VoxelSize"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_INT, _T("SubSamples"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); return TRUE; } TCHAR* OP_FlipFluidBasis::GetName() { return GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } OP_FlipFluidBasis::OP_FlipFluidBasis() { hipSetDevice(1); hipDeviceSynchronize(); pb2 = NULL; op_flipfluidbasis_Desc.MakeAutoParamBlocks(this); pmap = NULL; dlgProc = NULL; mPGroup = NULL; mSPGroup = NULL; solver = new Cu::CuSolver<double>(1, 1, 1, 1, 0.1, 1.0/30); mVoxelSize = 1; prevV = nullptr; prevP = nullptr; mDoInitial = true; mInvalidBoundaryDatas = true; calculateList = NULL;//TP_CreateCalculateList(); //hipSetDevice(1); } OP_FlipFluidBasis::~OP_FlipFluidBasis() { if (calculateList) calculateList->DeleteThis(); delete solver; RemoveBoundaryDatas(); } RefTargetHandle OP_FlipFluidBasis::Clone(RemapDir &remap) { OP_FlipFluidBasis *obj = new OP_FlipFluidBasis; if (!obj) return NULL; obj->ReplaceReference(0, remap.CloneRef(pb2)); return obj; } int OP_FlipFluidBasis::Version() { return 0; } int OP_FlipFluidBasis::SetInReCalculate(int id, InOutNode *inout, DYN &dyn) { dyn.calculate->AddOperator(inout); if (prevV != nullptr) { delete[] prevV; prevV = nullptr; } if (prevP != nullptr) { delete[] prevP; prevP = nullptr; } return TRUE;//inout->SetOutReCalculate(-1, dyn); } static inline void DrawCube(GraphicsWindow *gw, Point3 &pmin, Point3 &pmax) { Point3 pts[9]; Point3 tmppt[5]; pts[0] = Point3(pmax[0], pmax[1], pmax[2]); pts[1] = Point3(pmax[0], pmax[1], pmin[2]); pts[2] = Point3(pmax[0], pmin[1], pmin[2]); pts[3] = Point3(pmax[0], pmin[1], pmax[2]); pts[4] = Point3(pmin[0], pmax[1], pmax[2]); pts[5] = Point3(pmin[0], pmax[1], pmin[2]); pts[6] = Point3(pmin[0], pmin[1], pmin[2]); pts[7] = Point3(pmin[0], pmin[1], pmax[2]); int k; gw->polyline(4, pts, NULL, NULL, TRUE, NULL); for (k = 0; k < 4; k++) tmppt[k] = pts[4 + k]; gw->polyline(4, tmppt, NULL, NULL, TRUE, NULL); tmppt[0] = pts[0]; tmppt[1] = pts[4]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[1]; tmppt[1] = pts[5]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[2]; tmppt[1] = pts[6]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[3]; tmppt[1] = pts[7]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); } DynResult OP_FlipFluidBasis::EnumDyn(InOutNode *inout, int message, void *arg) { switch (message) { case DYN_MSG_INOUT_CHANGED: ivalid.SetEmpty(); break; case DYN_MSG_DYNSET_USERMSG: { } break; case DYN_MSG_DYNSET_REPLACE_GROUP: { DYN_MSG_DYNSET_Replace_Group *replace = (DYN_MSG_DYNSET_Replace_Group *)arg; if (GetPGroup() == replace->group) SetPGroup(replace->to_group); if (GetSPGroup() == replace->group) SetSPGroup(replace->to_group); } break; case DYN_MSG_DYNSET_GETALLUSED_GROUPS: { PGroup *group = GetPGroup(); if (group) ((Tab<PGroup*>*)arg)->Append(1, &group, 10); group = GetSPGroup(); if (group) ((Tab<PGroup*>*)arg)->Append(1, &group, 10); } break; case DYN_MSG_GROUP_PREREMOVE: { if (((GroupRemove*)arg)->group->IsSubGroup(GetPGroup())) SetPGroup(NULL); if (((GroupRemove*)arg)->group->IsSubGroup(GetSPGroup())) SetSPGroup(NULL); } break; case DYN_MSG_GROUP_TREECHANGED: if (dlgProc) dlgProc->UpdateGroups(); break; //case DYN_MSG_DISPLAY: //{ // DynDisplay *data = (DynDisplay*)arg; // DWORD origRndLimits; // //DynDisplay *data; // GraphicsWindow *gw; // Color col(1.0f, 0.0f, 0.0f); // Point3 pos(0.0f, 0.0f, 0.0f); // Point3 p[5]; // Point3 pmin, pmax; // bool multipass = false; // gw = data->vpt->getGW(); // //set the drawing transformation, "Matrix3(1)" mean Identity, all points to draw are in world space // gw->setTransform(Matrix3(1)); // origRndLimits = gw->getRndLimits(); // gw->setRndLimits(origRndLimits | GW_WIREFRAME | GW_Z_BUFFER);//make sure wireframe will displayed and the z (depth) buffer will evaluated // if (gw->querySupport(GW_SPT_GEOM_ACCEL)) // { // gw->multiplePass(-1, TRUE); // multipass = true; // } // if (multipass) gw->startMarkers(); // gw->setColor(LINE_COLOR, col.r, col.g, col.b); // gw->marker(&pos, POINT_MRKR); // if (multipass) gw->endMarkers(); // if (multipass) gw->startSegments(); // pmin = Point3(-5.0f, -5.0f, -5.0f); // pmax = Point3(5.0f, 5.0f, 5.0f); // gw->setColor(LINE_COLOR, 1.0f, 1.0f, 0.0f); // DrawCube(gw, pmin, pmax); // p[0] = Point3(0, 0, 0); // p[1] = Point3(10.0f, 0.0f, 0.0f); // gw->setColor(LINE_COLOR, 1.0f, 0.0f, 0.0f); // gw->segment(p, 1); // p[0] = Point3(0, 0, 0); // p[1] = Point3(0.0f, 10.0f, 0.0f); // gw->setColor(LINE_COLOR, 0.0f, 1.0f, 0.0f); // gw->segment(p, 1); // p[0] = Point3(0, 0, 0); // p[1] = Point3(0.0f, 0.0f, 10.0f); // gw->setColor(LINE_COLOR, 0.0f, 0.0f, 1.0f); // gw->segment(p, 1); // if (multipass) gw->endSegments(); // if (multipass) gw->multiplePass(-1, FALSE); // gw->setRndLimits(origRndLimits); //} //break; //case DYN_MSG_GETWORLDBOX: //the bounding box in world coordinates, enclosed all points that will displayed in the GraphicsWindow //{ // DynGetBox *data = (DynGetBox*)arg; // Box3 bbox; //empty box // //add points, the box will enclose these point // bbox += Point3(-10.0f, -10.0f, -10.0f); // bbox += Point3(10.0f, 10.0f, 10.0f); // if (!bbox.IsEmpty()) data->box += bbox; //} //break; case DYN_MSG_DYNSET_GETSTARTTIME: { mDoInitial = true; } break; case DYN_MSG_DYNSET_UPDATE: { UpdateInfo *info = (UpdateInfo*)arg; if (info->dyn->calculate) info->dyn->calculate->AddOperator(inout); } break; } return DYN_SUCCEED; } #if GET_MAX_RELEASE(VERSION_3DSMAX) < 17000 RefResult OP_OVDB_Bodyforce::NotifyRefChanged(Interval changeInt, RefTargetHandle hTarget, PartID& partID, RefMessage message) #else RefResult OP_FlipFluidBasis::NotifyRefChanged(const Interval &changeInt, RefTargetHandle hTarget, PartID& partID, RefMessage message, BOOL propagate) #endif { switch (message) { case REFMSG_CHANGE: if (hTarget == pb2) { int index = -1; ivalid.SetEmpty(); switch (pb2->LastNotifyParamID(index)) { case FLIP_PGROUP: case FLIP_SPGROUP: if (dlgProc) dlgProc->UpdateGroups(); break; case FLIP_VOXEL_SIZE: mDoInitial = true; break; case FLIP_BOUNDARY_INODES: { if (index >= 0 && !pb2->GetINode(FLIP_BOUNDARY_INODES, 0, index)) { pb2->EnableNotifications(FALSE); pb2->Delete(FLIP_BOUNDARY_INODES, index, 1); pb2->EnableNotifications(TRUE); } if(pb2->Count(FLIP_BOUNDARY_INODES) != mINodeBoundaryDatas.Count()) mInvalidBoundaryDatas = true; } break; } } break; } return REF_SUCCEED; } int OP_FlipFluidBasis::NumRefs() { return 1; } RefTargetHandle OP_FlipFluidBasis::GetReference(int i) { return pb2; } void OP_FlipFluidBasis::SetReference(int i, RefTargetHandle rtarg) { pb2 = (IParamBlock2*)rtarg; } int OP_FlipFluidBasis::NumSubs() { return 1; } Animatable* OP_FlipFluidBasis::SubAnim(int i) { return pb2; } TSTR OP_FlipFluidBasis::SubAnimName(int i) { return _T("Parameter"); } void OP_FlipFluidBasis::BeginEditParams(IObjParam *ip, ULONG flags, Animatable *prev) { if (pmap) { if (dlgProc) dlgProc->SetObject(this); else { dlgProc = new OP_FlipFluidBasis_DlgProc(this); pmap->SetUserDlgProc(dlgProc); } pmap->SetParamBlock(pb2); } else { dlgProc = new OP_FlipFluidBasis_DlgProc(this); pmap = CreateRParamMap2(pb2, GetRightIRendParams(), hInstance, MAKEINTRESOURCE(IDD_OP_FLIPFLUIDBASIS_UI), GetName(), 0, dlgProc); } } void OP_FlipFluidBasis::EndEditParams(IObjParam *ip, ULONG flags, Animatable *next) { if (flags & END_EDIT_REMOVEUI) { if (pmap) { DestroyRParamMap2(pmap); pmap = NULL; } dlgProc = NULL; } } int OP_FlipFluidBasis::Type() { return DYN_TYPE_OPERATOR; } Class_ID OP_FlipFluidBasis::ClassID() { return OP_FLIPFLUIDBASIS_CLASS_ID; } void OP_FlipFluidBasis::GetClassName(TSTR& s) { s = GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } int OP_FlipFluidBasis::NumParamBlocks() { return 1; } IParamBlock2* OP_FlipFluidBasis::GetParamBlock(int i) { return pb2; } IParamBlock2* OP_FlipFluidBasis::GetParamBlockByID(short id) { return id ? NULL : pb2; } void OP_FlipFluidBasis::DeleteThis() { delete this; } void OP_FlipFluidBasis::SetPGroup(PGroup *group) { pb2->SetValue(FLIP_PGROUP, 0, (ReferenceTarget*)group); } PGroup *OP_FlipFluidBasis::GetPGroup() { return (PGroup*)pb2->GetReferenceTarget(FLIP_PGROUP, 0); } void OP_FlipFluidBasis::SetSPGroup(PGroup *group) { pb2->SetValue(FLIP_SPGROUP, 0, (ReferenceTarget*)group); } PGroup *OP_FlipFluidBasis::GetSPGroup() { return (PGroup*)pb2->GetReferenceTarget(FLIP_SPGROUP, 0); } OP_FlipFluidBasis_DlgProc::OP_FlipFluidBasis_DlgProc(OP_FlipFluidBasis *p) { map = NULL; op = p; SBox = NULL; GBox = NULL; } void OP_FlipFluidBasis_DlgProc::SetObject(OP_FlipFluidBasis *p) { op = p; } void OP_FlipFluidBasis_DlgProc::DeleteThis() { delete this; } void OP_FlipFluidBasis_DlgProc::UpdateGroups() { Tab<PGroup*> gtab; SetWindowRedraw(GBox, FALSE); SetWindowRedraw(SBox, FALSE); SendMessage(GBox, CB_RESETCONTENT, 0, 0); SendMessage(SBox, CB_RESETCONTENT, 0, 0); SendMessage(GBox, CB_ADDSTRING, 0, (LPARAM)_T("None")); SendMessage(SBox, CB_ADDSTRING, 0, (LPARAM)_T("None")); info.groupmanager->EnumDyn(NULL, DYN_MSG_GROUP_GETALL, (void*)&gtab); for (int i = 0; i < gtab.Count(); i++) { SendMessage(GBox, CB_ADDSTRING, 0, (LPARAM)gtab[i]->GetName()); SendMessage(GBox, CB_SETITEMDATA, (WPARAM)gtab[i]->GetGroupID() + 1, (LPARAM)gtab[i]); SendMessage(SBox, CB_ADDSTRING, 0, (LPARAM)gtab[i]->GetName()); SendMessage(SBox, CB_SETITEMDATA, (WPARAM)gtab[i]->GetGroupID() + 1, (LPARAM)gtab[i]); } if (op->GetPGroup()) SendMessage(GBox, CB_SETCURSEL, op->GetPGroup()->GetGroupID() + 1, 0); else SendMessage(GBox, CB_SETCURSEL, 0, 0); if (op->GetSPGroup()) SendMessage(SBox, CB_SETCURSEL, op->GetSPGroup()->GetGroupID() + 1, 0); else SendMessage(SBox, CB_SETCURSEL, 0, 0); SetWindowRedraw(GBox, TRUE); SetWindowRedraw(SBox, TRUE); } INT_PTR OP_FlipFluidBasis_DlgProc::DlgProc(TimeValue t, IParamMap2 *map, HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { switch (msg) { case WM_INITDIALOG: { this->map = map; GBox = GetDlgItem(hWnd, IDC_OP_FLIPFLUIDBASIS_PGROUP); SBox = GetDlgItem(hWnd, IDC_OP_FLIPFLUIDBASIS_SPGROUP); op->NotifyDependents(FOREVER, (PartID)&info, REFMSG_DYN_GETINFO); UpdateGroups(); return TRUE; } case WM_DESTROY: break; case WM_COMMAND: switch (LOWORD(wParam)) { case IDC_OP_FLIPFLUIDBASIS_PGROUP: switch (HIWORD(wParam)) { case CBN_SELCHANGE: { int i = ComboBox_GetCurSel(GBox); PGroup *group = (PGroup*)ComboBox_GetItemData(GBox, i); op->SetPGroup(i ? group : NULL); } break; } break; case IDC_OP_FLIPFLUIDBASIS_SPGROUP: switch (HIWORD(wParam)) { case CBN_SELCHANGE: { int i = ComboBox_GetCurSel(SBox); PGroup *group = (PGroup*)ComboBox_GetItemData(SBox, i); op->SetSPGroup(i ? group : NULL); } break; } break; } break; } return FALSE; } ///////////
52e506042a706067793dd6f42980dc57280dfd19.cu
//OP_FlipFluidBasis //#include <Thinking.h> #include "OP_FlipFluidBasis.h" #include "MatterWaves.h" #include "Calculate.h" //class descriptor class OP_FlipFluidBasis_ClassDesc : public ClassDesc2 { public: int IsPublic() { return FALSE; } void *Create(BOOL loading = FALSE) { return new OP_FlipFluidBasis; } const TCHAR* ClassName() { return GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } SClass_ID SuperClassID() { return REF_TARGET_CLASS_ID; } Class_ID ClassID() { return OP_FLIPFLUIDBASIS_CLASS_ID; } const TCHAR* Category() { return GetString(IDS_CATEGORY_FLIPFLUID, hInstance); } const TCHAR* InternalName() { return _T("OP_FlipFluidBasis"); } HINSTANCE HInstance() { return hInstance; } }; static OP_FlipFluidBasis_ClassDesc op_flipfluidbasis_Desc; ClassDesc* GetOP_FlipFluidBasis_Desc() { return &op_flipfluidbasis_Desc; } //end class descriptor //Parameter #define FLIP_SPGROUP 50 #define FLIP_SPGROUP_SUB 51 #define FLIP_PGROUP 52 #define FLIP_PGROUP_SUB 53 #define FLIP_TYPE 54 #define FLIP_VOXEL_SIZE 55 #define FLIP_SUB_SAMPLES 56 #define FLIP_BOUNDARY_INODES 57 #define FLIP_PGROUP_SUB_DEF FALSE #define FLIP_SPGROUP_SUB_DEF FALSE #define FLIP_TYPE_1 0 #define FLIP_TYPE_2 1 #define FLIP_TYPE_3 2 #define FLIP_TYPE_DEF 0 #define FLIP_TYPE_MIN 0 #define FLIP_TYPE_MAX 2 #define FLIP_VOXEL_SIZE_DEF 0.025f #define FLIP_VOXEL_SIZE_MIN 0.0 #define FLIP_VOXEL_SIZE_MAX MAX_FVALUE #define FLIP_SUB_SAMPLES_DEF 0 #define FLIP_SUB_SAMPLES_MIN 0 #define FLIP_SUB_SAMPLES_MAX MAX_IVALUE //Node Input #define FLIP_ON_IN 0 #define FLIP_TIME_IN 1 #define FLIP_SPGROUP_IN 2 #define FLIP_SPGROUP_SUB_IN 3 #define FLIP_PGROUP_IN 4 #define FLIP_PGROUP_SUB_IN 5 #define FLIP_TYPE_IN 6 #define FLIP_VOXEL_SIZE_IN 7 #define FLIP_SUB_SAMPLES_IN 8 class BoundaryObjectValidator : public PBValidator { public: BOOL Validate(PB2Value& v){ return TRUE; } BOOL Validate(PB2Value& v, ReferenceMaker* owner, ParamID id, int tabIndex) { if(id == FLIP_BOUNDARY_INODES) { if(!v.r) return FALSE; int i; for(i = ((OP_FlipFluidBasis*)owner)->GetParamBlock()->Count(id) -1; i >= 0; i--) { if(v.r == ((OP_FlipFluidBasis*)owner)->GetParamBlock()->GetINode(id, 0, i)) return FALSE; } } return TRUE; } }; BoundaryObjectValidator object_validator; static ParamBlockDesc2 op_flipfluidbasis_ParamBlock( OP_FLIPFLUIDBASIS_PARAM_BLOCK, _T("Parameters"), 0, &op_flipfluidbasis_Desc, P_AUTO_CONSTRUCT, 0, // params FLIP_SPGROUP, _T("SourcePGroup"), TYPE_REFTARG, P_NO_REF, IDS_OP_FLIPFLUIDBASIS_SPGROUP, p_classID, PGROUP_CLASS_ID, p_end, FLIP_SPGROUP_SUB, _T("SourcePGroupSub"), TYPE_BOOL, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_SPGROUP_SUB, p_default, FLIP_SPGROUP_SUB_DEF, p_ui, TYPE_SINGLECHEKBOX, IDC_OP_FLIPFLUIDBASIS_SPGROUP_SUB, p_end, FLIP_PGROUP, _T("TargetPGroup"), TYPE_REFTARG, P_NO_REF, IDS_OP_FLIPFLUIDBASIS_PGROUP, p_classID, PGROUP_CLASS_ID, p_end, FLIP_PGROUP_SUB, _T("TargetPGroupSub"), TYPE_BOOL, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_PGROUP_SUB, p_default, FLIP_PGROUP_SUB_DEF, p_ui, TYPE_SINGLECHEKBOX, IDC_OP_FLIPFLUIDBASIS_PGROUP_SUB, p_end, FLIP_TYPE, _T("Type"), TYPE_INT, 0, IDS_OP_FLIPFLUIDBASIS_TYPE, p_default, FLIP_TYPE_DEF, p_range, FLIP_TYPE_MIN, FLIP_TYPE_MAX, p_ui, TYPE_INTLISTBOX, IDC_OP_FLIPFLUIDBASIS_TYPE, 3, IDS_OP_FLIPFLUIDBASIS_TYPE_1, IDS_OP_FLIPFLUIDBASIS_TYPE_2, IDS_OP_FLIPFLUIDBASIS_TYPE_3, p_end, FLIP_VOXEL_SIZE, _T("VoxelSize"), TYPE_WORLD, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_VOXEL_SIZE, p_default, FLIP_VOXEL_SIZE_DEF, p_range, FLIP_VOXEL_SIZE_MIN, FLIP_VOXEL_SIZE_MAX, p_ui, TYPE_SPINNER, EDITTYPE_UNIVERSE, IDC_OP_FLIPFLUIDBASIS_VOXEL_SIZE, IDC_OP_FLIPFLUIDBASIS_VOXEL_SIZE_SPIN, 0.01f, p_end, FLIP_SUB_SAMPLES, _T("SubSamples"), TYPE_INT, P_ANIMATABLE, IDS_OP_FLIPFLUIDBASIS_SUB_SAMPLES, p_default, FLIP_SUB_SAMPLES_DEF, p_range, FLIP_SUB_SAMPLES_MIN, FLIP_SUB_SAMPLES_MAX, p_ui, TYPE_SPINNER, EDITTYPE_INT, IDC_OP_FLIPFLUIDBASIS_SUB_SAMPLES, IDC_OP_FLIPFLUIDBASIS_SUB_SAMPLES_SPIN, 0.01f, p_end, FLIP_BOUNDARY_INODES, _T("BoundaryObjects"), TYPE_INODE_TAB, 0, P_VARIABLE_SIZE, IDS_OP_FLIPFLUIDBASIS_BOUNDARY_INODES, p_validator, &object_validator, p_ui, TYPE_NODELISTBOX, IDC_OP_FLIPFLUIDBASIS_INODE_LIST, IDC_OP_FLIPFLUIDBASIS_PICKNODE, 0, IDC_OP_FLIPFLUIDBASIS_REMOVENODE, p_end, p_end, p_end); int OP_FlipFluidBasis::Update(InOutNode *inout, DYN &dyn) { int ret, changed = 0; INOUT_GetInValue gi; gi.last_ivalid = ivalid; gi.t = inout->GetInTime(); gi.pb = pb2; gi.id = FLIP_SPGROUP; if ((ret = inout->GetInValue(gi, FLIP_SPGROUP_IN, &mSPGroup, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_SPGROUP_SUB; if ((ret = inout->GetInValue(gi, FLIP_SPGROUP_SUB_IN, &mSPGroupSub, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_PGROUP; if ((ret = inout->GetInValue(gi, FLIP_PGROUP_IN, &mPGroup, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_PGROUP_SUB; if ((ret = inout->GetInValue(gi, FLIP_PGROUP_SUB_IN, &mPGroupSub, dyn)) < 0) return -1; if (ret > 0) { changed = 1; } gi.id = FLIP_TYPE; if ((ret = inout->GetInValue(gi, FLIP_TYPE_IN, &mType, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mType, FLIP_TYPE_MIN, FLIP_TYPE_MAX); changed = 1; } gi.id = FLIP_VOXEL_SIZE; if ((ret = inout->GetInValue(gi, FLIP_VOXEL_SIZE_IN, &mVoxelSize, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mVoxelSize, FLIP_VOXEL_SIZE_MIN, FLIP_VOXEL_SIZE_MAX); changed = 1; } gi.id = FLIP_SUB_SAMPLES; if ((ret = inout->GetInValue(gi, FLIP_SUB_SAMPLES_IN, &mSubSamples, dyn)) < 0) return -1; if (ret > 0) { ClampMinMax(mSubSamples, FLIP_SUB_SAMPLES_MIN, FLIP_SUB_SAMPLES_MAX); changed = 1; } if (!gi.ivalid.Empty()) ivalid = gi.ivalid; return changed; } int OP_FlipFluidBasis::Calculate(int id, void *val, InOutNode *inout, DYN &dyn) { if (id >= 0) { return FALSE; } if (Update(inout, dyn) < 0) return FALSE; if (!mPGroup) return FALSE; FlipFluidBasisThreadData tdata; tdata.dt = float(dyn.global_dt) / float(TIME_TICKSPERSEC); //secound based delta time from tp //Read the particle data from tp ReadParticleDatas(inout, dyn, &tdata); //Read meshes and transformation matrix from max SetupBoundaryDatas(inout, dyn, &tdata); cudaSetDevice(0); cudaDeviceSynchronize(); //solver->resizeTime(tdata.dt); //improve efficiency on this, cache on GPU with single CuSolver for class, single initialization from first frame particles and keep data on GPU //With a class bool variable it is better to control when to make a initialization //But keep in mind, the data can't left on the GPU, so when the solver is done and anyone use the gpu, for rendering as sample the data are corrupt if (mDoInitial == true) { if (pb2->Count(FLIP_BOUNDARY_INODES)) { Point3 min, max; Box3 bb; bb = mINodeBoundaryDatas[0]->meshnode->GetBoundingBox(); bb = bb * mINodeBoundaryDatas[0]->objToWorld_dt; min = bb.pmin; max = bb.pmax; for (int i = 1; i < pb2->Count(FLIP_BOUNDARY_INODES); ++i) { bb = mINodeBoundaryDatas[i]->meshnode->GetBoundingBox(); bb = bb * mINodeBoundaryDatas[i]->objToWorld_dt; if (bb.pmin.x < min.x) min.x = bb.pmin.x; if (bb.pmin.y < min.y) min.y = bb.pmin.y; if (bb.pmin.z < min.z) min.z = bb.pmin.z; if (bb.pmax.x < max.x) max.x = bb.pmax.x; if (bb.pmax.y < max.y) max.y = bb.pmax.y; if (bb.pmax.z < max.z) max.z = bb.pmax.z; } solver->setDimensions(max.x - min.x, max.y - min.y, max.z - min.z); //placeholder dimensions, need to read in geometry dimensions solver->setWorldPosition(min.x, min.y, min.z); solver->setdx(mVoxelSize); } solver->setSubSamples(mSubSamples); if (prevV != nullptr) { delete[] prevV; prevV = nullptr; } if (prevP != nullptr) { delete[] prevP; prevP = nullptr; } mDoInitial = false; } solver->readParticlesFromTP(&tdata, prevV, prevP); cudaDeviceSynchronize(); if(mType == 0) solver->advectSingleFrameCPU(); else if (mType == 1) solver->advectSingleFrameGPU(); cudaDeviceSynchronize(); solver->writeParticlesToTP(&tdata, prevV, prevP); cudaDeviceSynchronize(); //write back the changed velocity of the particles to tp WriteParticleDatas(inout, dyn, &tdata); return TRUE; } void OP_FlipFluidBasis::SetupBoundaryDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { int i, count; FlipINodeBoundaryData *ndata; INode* inode = NULL; //Checked the picked max nodes in the parameter block and load the meshes and make a transformation matrix update //mInvalidBoundaryDatas is a bool, at load or create and when the user picked or removed a max node this flag will turned on count = pb2->Count(FLIP_BOUNDARY_INODES); if(mInvalidBoundaryDatas == true) { RemoveBoundaryDatas(); mINodeBoundaryDatas.SetCount(count); for(i = 0; i < count; i++) { mINodeBoundaryDatas[i] = NULL; } mInvalidBoundaryDatas = false; } for(i = 0; i < count; i++) { inode = pb2->GetINode(FLIP_BOUNDARY_INODES, dyn.global_time, i); if(!inode) { if(mINodeBoundaryDatas[i]) delete mINodeBoundaryDatas[i]; mINodeBoundaryDatas[i] = NULL; continue; } if(!mINodeBoundaryDatas[i]) { ndata = new FlipINodeBoundaryData(); ndata->objToWorld_dt = inode->GetObjTMAfterWSM(dyn.global_time - dyn.global_dt); ndata->worldToObject_dt = Inverse(ndata->objToWorld_dt); ndata->tm_ivalid.SetEmpty(); mINodeBoundaryDatas[i] = ndata; } else ndata = mINodeBoundaryDatas[i]; if(ndata->meshnode && !ndata->meshnode->ValidInterval().InInterval(dyn.global_time)) { GetGlobalMeshManager()->Remove(ndata->meshnode); ndata->meshnode = NULL; } if(!ndata->meshnode) { ndata->meshnode = GetGlobalMeshManager()->Create(inode, dyn.global_time); //With the function GetMesh() in the TP_MeshNode class you get the max mesh from the node, with all vertex and face information //the vertex coordinates are in object space, to get the current world coordinates you must multiply with the objToWorld_dt matrix } if(!ndata->tm_ivalid.InInterval(dyn.global_time)) { ndata->tm_ivalid.SetInfinite(); ndata->objToWorld = ndata->objToWorld_dt; ndata->worldToObject = ndata->worldToObject_dt; ndata->objToWorld_dt = inode->GetObjTMAfterWSM(dyn.global_time, &ndata->tm_ivalid); ndata->worldToObject_dt = Inverse(ndata->objToWorld_dt); //objToWorld is the last transformation matrix and objToWorld_dt the current one of the node, if the matrix not animated than both are the same } } } void OP_FlipFluidBasis::RemoveBoundaryDatas() { int count = mINodeBoundaryDatas.Count(); for(int i = 0; i < count; i++) { if(mINodeBoundaryDatas[i]) delete mINodeBoundaryDatas[i]; } mINodeBoundaryDatas.SetCount(0); } void OP_FlipFluidBasis::ReadParticleDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { Tab<PGroup*> groups; int i, pid, pcount, gcount; CNode *idnode; groups.SetCount(0); if (mPGroupSub) mPGroup->EnumDyn(NULL, DYN_MSG_GROUP_GETALL, &groups); else groups.Append(1, &mPGroup, 20); gcount = groups.Count(); pcount = 0; for (i = 0; i < gcount; i++) pcount += groups[i]->partcount; if (!pcount) return; tdata->datas.SetCount(pcount); pcount = 0; for (i = 0; i < gcount; i++) { idnode = groups[i]->pidlist.GetFirstNode(); for (; idnode != NULL; idnode = idnode->GetNextNode()) { if (dyn.mastersystem->Alive(((ParticleNode*)idnode)->id)) { pid = ((ParticleNode*)idnode)->id; tdata->datas[pcount].pid = pid; tdata->datas[pcount].pos = dyn.mastersystem->Position(pid); //max have a fixed integer ticks per secound time base, so the velocity must multipled with ticks per secound to get the real speed in secound tdata->datas[pcount].vel = dyn.mastersystem->Velocity(pid) * float(TIME_TICKSPERSEC); //max time values are in integer tick tdata->datas[pcount].dt_factor = dyn.mastersystem->DTFactor(pid); tdata->datas[pcount].mass = dyn.mastersystem->Mass(pid); pcount++; } } } if (tdata->datas.Count() != pcount) tdata->datas.SetCount(pcount, FALSE); } void OP_FlipFluidBasis::WriteParticleDatas(InOutNode *inout, DYN &dyn, FlipFluidBasisThreadData *tdata) { int i, pcount; pcount = tdata->datas.Count(); for (i = 0; i < pcount; i++) { //max have a fixed integer ticks per secound time base, so the velocity must divide with ticks per secound to get the max ticks speed in secound dyn.mastersystem->SetVelocity(tdata->datas[i].pid, tdata->datas[i].vel / float(TIME_TICKSPERSEC)); dyn.mastersystem->SetPosition(tdata->datas[i].pid, tdata->datas[i].pos); /* if (tdata->datas[i].flags & 16) { dyn.mastersystem->Die(tdata->datas[i].pid); } */ } } TCHAR *OP_FlipFluidBasis::GetInOutputName(InOutNode *inout, int id, BOOL input) { return NULL; } BOOL OP_FlipFluidBasis::InitInOutputs(InOutNode *inout) { inout->AddInput(PORT_TYPE_GROUP, _T("SourcePGroup"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_BOOL, _T("SourcePGroupSubs"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_GROUP, _T("TargetPGroup"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_BOOL, _T("TargetPGroupSubs"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_INT, _T("Type"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_FLOAT, _T("VoxelSize"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); inout->AddInput(PORT_TYPE_INT, _T("SubSamples"), INOUT_FLAG_ENABLE | INOUT_FLAG_INVISIBLE); return TRUE; } TCHAR* OP_FlipFluidBasis::GetName() { return GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } OP_FlipFluidBasis::OP_FlipFluidBasis() { cudaSetDevice(1); cudaDeviceSynchronize(); pb2 = NULL; op_flipfluidbasis_Desc.MakeAutoParamBlocks(this); pmap = NULL; dlgProc = NULL; mPGroup = NULL; mSPGroup = NULL; solver = new Cu::CuSolver<double>(1, 1, 1, 1, 0.1, 1.0/30); mVoxelSize = 1; prevV = nullptr; prevP = nullptr; mDoInitial = true; mInvalidBoundaryDatas = true; calculateList = NULL;//TP_CreateCalculateList(); //cudaSetDevice(1); } OP_FlipFluidBasis::~OP_FlipFluidBasis() { if (calculateList) calculateList->DeleteThis(); delete solver; RemoveBoundaryDatas(); } RefTargetHandle OP_FlipFluidBasis::Clone(RemapDir &remap) { OP_FlipFluidBasis *obj = new OP_FlipFluidBasis; if (!obj) return NULL; obj->ReplaceReference(0, remap.CloneRef(pb2)); return obj; } int OP_FlipFluidBasis::Version() { return 0; } int OP_FlipFluidBasis::SetInReCalculate(int id, InOutNode *inout, DYN &dyn) { dyn.calculate->AddOperator(inout); if (prevV != nullptr) { delete[] prevV; prevV = nullptr; } if (prevP != nullptr) { delete[] prevP; prevP = nullptr; } return TRUE;//inout->SetOutReCalculate(-1, dyn); } static inline void DrawCube(GraphicsWindow *gw, Point3 &pmin, Point3 &pmax) { Point3 pts[9]; Point3 tmppt[5]; pts[0] = Point3(pmax[0], pmax[1], pmax[2]); pts[1] = Point3(pmax[0], pmax[1], pmin[2]); pts[2] = Point3(pmax[0], pmin[1], pmin[2]); pts[3] = Point3(pmax[0], pmin[1], pmax[2]); pts[4] = Point3(pmin[0], pmax[1], pmax[2]); pts[5] = Point3(pmin[0], pmax[1], pmin[2]); pts[6] = Point3(pmin[0], pmin[1], pmin[2]); pts[7] = Point3(pmin[0], pmin[1], pmax[2]); int k; gw->polyline(4, pts, NULL, NULL, TRUE, NULL); for (k = 0; k < 4; k++) tmppt[k] = pts[4 + k]; gw->polyline(4, tmppt, NULL, NULL, TRUE, NULL); tmppt[0] = pts[0]; tmppt[1] = pts[4]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[1]; tmppt[1] = pts[5]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[2]; tmppt[1] = pts[6]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); tmppt[0] = pts[3]; tmppt[1] = pts[7]; gw->polyline(2, tmppt, NULL, NULL, FALSE, NULL); } DynResult OP_FlipFluidBasis::EnumDyn(InOutNode *inout, int message, void *arg) { switch (message) { case DYN_MSG_INOUT_CHANGED: ivalid.SetEmpty(); break; case DYN_MSG_DYNSET_USERMSG: { } break; case DYN_MSG_DYNSET_REPLACE_GROUP: { DYN_MSG_DYNSET_Replace_Group *replace = (DYN_MSG_DYNSET_Replace_Group *)arg; if (GetPGroup() == replace->group) SetPGroup(replace->to_group); if (GetSPGroup() == replace->group) SetSPGroup(replace->to_group); } break; case DYN_MSG_DYNSET_GETALLUSED_GROUPS: { PGroup *group = GetPGroup(); if (group) ((Tab<PGroup*>*)arg)->Append(1, &group, 10); group = GetSPGroup(); if (group) ((Tab<PGroup*>*)arg)->Append(1, &group, 10); } break; case DYN_MSG_GROUP_PREREMOVE: { if (((GroupRemove*)arg)->group->IsSubGroup(GetPGroup())) SetPGroup(NULL); if (((GroupRemove*)arg)->group->IsSubGroup(GetSPGroup())) SetSPGroup(NULL); } break; case DYN_MSG_GROUP_TREECHANGED: if (dlgProc) dlgProc->UpdateGroups(); break; //case DYN_MSG_DISPLAY: //{ // DynDisplay *data = (DynDisplay*)arg; // DWORD origRndLimits; // //DynDisplay *data; // GraphicsWindow *gw; // Color col(1.0f, 0.0f, 0.0f); // Point3 pos(0.0f, 0.0f, 0.0f); // Point3 p[5]; // Point3 pmin, pmax; // bool multipass = false; // gw = data->vpt->getGW(); // //set the drawing transformation, "Matrix3(1)" mean Identity, all points to draw are in world space // gw->setTransform(Matrix3(1)); // origRndLimits = gw->getRndLimits(); // gw->setRndLimits(origRndLimits | GW_WIREFRAME | GW_Z_BUFFER);//make sure wireframe will displayed and the z (depth) buffer will evaluated // if (gw->querySupport(GW_SPT_GEOM_ACCEL)) // { // gw->multiplePass(-1, TRUE); // multipass = true; // } // if (multipass) gw->startMarkers(); // gw->setColor(LINE_COLOR, col.r, col.g, col.b); // gw->marker(&pos, POINT_MRKR); // if (multipass) gw->endMarkers(); // if (multipass) gw->startSegments(); // pmin = Point3(-5.0f, -5.0f, -5.0f); // pmax = Point3(5.0f, 5.0f, 5.0f); // gw->setColor(LINE_COLOR, 1.0f, 1.0f, 0.0f); // DrawCube(gw, pmin, pmax); // p[0] = Point3(0, 0, 0); // p[1] = Point3(10.0f, 0.0f, 0.0f); // gw->setColor(LINE_COLOR, 1.0f, 0.0f, 0.0f); // gw->segment(p, 1); // p[0] = Point3(0, 0, 0); // p[1] = Point3(0.0f, 10.0f, 0.0f); // gw->setColor(LINE_COLOR, 0.0f, 1.0f, 0.0f); // gw->segment(p, 1); // p[0] = Point3(0, 0, 0); // p[1] = Point3(0.0f, 0.0f, 10.0f); // gw->setColor(LINE_COLOR, 0.0f, 0.0f, 1.0f); // gw->segment(p, 1); // if (multipass) gw->endSegments(); // if (multipass) gw->multiplePass(-1, FALSE); // gw->setRndLimits(origRndLimits); //} //break; //case DYN_MSG_GETWORLDBOX: //the bounding box in world coordinates, enclosed all points that will displayed in the GraphicsWindow //{ // DynGetBox *data = (DynGetBox*)arg; // Box3 bbox; //empty box // //add points, the box will enclose these point // bbox += Point3(-10.0f, -10.0f, -10.0f); // bbox += Point3(10.0f, 10.0f, 10.0f); // if (!bbox.IsEmpty()) data->box += bbox; //} //break; case DYN_MSG_DYNSET_GETSTARTTIME: { mDoInitial = true; } break; case DYN_MSG_DYNSET_UPDATE: { UpdateInfo *info = (UpdateInfo*)arg; if (info->dyn->calculate) info->dyn->calculate->AddOperator(inout); } break; } return DYN_SUCCEED; } #if GET_MAX_RELEASE(VERSION_3DSMAX) < 17000 RefResult OP_OVDB_Bodyforce::NotifyRefChanged(Interval changeInt, RefTargetHandle hTarget, PartID& partID, RefMessage message) #else RefResult OP_FlipFluidBasis::NotifyRefChanged(const Interval &changeInt, RefTargetHandle hTarget, PartID& partID, RefMessage message, BOOL propagate) #endif { switch (message) { case REFMSG_CHANGE: if (hTarget == pb2) { int index = -1; ivalid.SetEmpty(); switch (pb2->LastNotifyParamID(index)) { case FLIP_PGROUP: case FLIP_SPGROUP: if (dlgProc) dlgProc->UpdateGroups(); break; case FLIP_VOXEL_SIZE: mDoInitial = true; break; case FLIP_BOUNDARY_INODES: { if (index >= 0 && !pb2->GetINode(FLIP_BOUNDARY_INODES, 0, index)) { pb2->EnableNotifications(FALSE); pb2->Delete(FLIP_BOUNDARY_INODES, index, 1); pb2->EnableNotifications(TRUE); } if(pb2->Count(FLIP_BOUNDARY_INODES) != mINodeBoundaryDatas.Count()) mInvalidBoundaryDatas = true; } break; } } break; } return REF_SUCCEED; } int OP_FlipFluidBasis::NumRefs() { return 1; } RefTargetHandle OP_FlipFluidBasis::GetReference(int i) { return pb2; } void OP_FlipFluidBasis::SetReference(int i, RefTargetHandle rtarg) { pb2 = (IParamBlock2*)rtarg; } int OP_FlipFluidBasis::NumSubs() { return 1; } Animatable* OP_FlipFluidBasis::SubAnim(int i) { return pb2; } TSTR OP_FlipFluidBasis::SubAnimName(int i) { return _T("Parameter"); } void OP_FlipFluidBasis::BeginEditParams(IObjParam *ip, ULONG flags, Animatable *prev) { if (pmap) { if (dlgProc) dlgProc->SetObject(this); else { dlgProc = new OP_FlipFluidBasis_DlgProc(this); pmap->SetUserDlgProc(dlgProc); } pmap->SetParamBlock(pb2); } else { dlgProc = new OP_FlipFluidBasis_DlgProc(this); pmap = CreateRParamMap2(pb2, GetRightIRendParams(), hInstance, MAKEINTRESOURCE(IDD_OP_FLIPFLUIDBASIS_UI), GetName(), 0, dlgProc); } } void OP_FlipFluidBasis::EndEditParams(IObjParam *ip, ULONG flags, Animatable *next) { if (flags & END_EDIT_REMOVEUI) { if (pmap) { DestroyRParamMap2(pmap); pmap = NULL; } dlgProc = NULL; } } int OP_FlipFluidBasis::Type() { return DYN_TYPE_OPERATOR; } Class_ID OP_FlipFluidBasis::ClassID() { return OP_FLIPFLUIDBASIS_CLASS_ID; } void OP_FlipFluidBasis::GetClassName(TSTR& s) { s = GetString(IDS_OP_FLIPFLUIDBASIS_CLASS, hInstance); } int OP_FlipFluidBasis::NumParamBlocks() { return 1; } IParamBlock2* OP_FlipFluidBasis::GetParamBlock(int i) { return pb2; } IParamBlock2* OP_FlipFluidBasis::GetParamBlockByID(short id) { return id ? NULL : pb2; } void OP_FlipFluidBasis::DeleteThis() { delete this; } void OP_FlipFluidBasis::SetPGroup(PGroup *group) { pb2->SetValue(FLIP_PGROUP, 0, (ReferenceTarget*)group); } PGroup *OP_FlipFluidBasis::GetPGroup() { return (PGroup*)pb2->GetReferenceTarget(FLIP_PGROUP, 0); } void OP_FlipFluidBasis::SetSPGroup(PGroup *group) { pb2->SetValue(FLIP_SPGROUP, 0, (ReferenceTarget*)group); } PGroup *OP_FlipFluidBasis::GetSPGroup() { return (PGroup*)pb2->GetReferenceTarget(FLIP_SPGROUP, 0); } OP_FlipFluidBasis_DlgProc::OP_FlipFluidBasis_DlgProc(OP_FlipFluidBasis *p) { map = NULL; op = p; SBox = NULL; GBox = NULL; } void OP_FlipFluidBasis_DlgProc::SetObject(OP_FlipFluidBasis *p) { op = p; } void OP_FlipFluidBasis_DlgProc::DeleteThis() { delete this; } void OP_FlipFluidBasis_DlgProc::UpdateGroups() { Tab<PGroup*> gtab; SetWindowRedraw(GBox, FALSE); SetWindowRedraw(SBox, FALSE); SendMessage(GBox, CB_RESETCONTENT, 0, 0); SendMessage(SBox, CB_RESETCONTENT, 0, 0); SendMessage(GBox, CB_ADDSTRING, 0, (LPARAM)_T("None")); SendMessage(SBox, CB_ADDSTRING, 0, (LPARAM)_T("None")); info.groupmanager->EnumDyn(NULL, DYN_MSG_GROUP_GETALL, (void*)&gtab); for (int i = 0; i < gtab.Count(); i++) { SendMessage(GBox, CB_ADDSTRING, 0, (LPARAM)gtab[i]->GetName()); SendMessage(GBox, CB_SETITEMDATA, (WPARAM)gtab[i]->GetGroupID() + 1, (LPARAM)gtab[i]); SendMessage(SBox, CB_ADDSTRING, 0, (LPARAM)gtab[i]->GetName()); SendMessage(SBox, CB_SETITEMDATA, (WPARAM)gtab[i]->GetGroupID() + 1, (LPARAM)gtab[i]); } if (op->GetPGroup()) SendMessage(GBox, CB_SETCURSEL, op->GetPGroup()->GetGroupID() + 1, 0); else SendMessage(GBox, CB_SETCURSEL, 0, 0); if (op->GetSPGroup()) SendMessage(SBox, CB_SETCURSEL, op->GetSPGroup()->GetGroupID() + 1, 0); else SendMessage(SBox, CB_SETCURSEL, 0, 0); SetWindowRedraw(GBox, TRUE); SetWindowRedraw(SBox, TRUE); } INT_PTR OP_FlipFluidBasis_DlgProc::DlgProc(TimeValue t, IParamMap2 *map, HWND hWnd, UINT msg, WPARAM wParam, LPARAM lParam) { switch (msg) { case WM_INITDIALOG: { this->map = map; GBox = GetDlgItem(hWnd, IDC_OP_FLIPFLUIDBASIS_PGROUP); SBox = GetDlgItem(hWnd, IDC_OP_FLIPFLUIDBASIS_SPGROUP); op->NotifyDependents(FOREVER, (PartID)&info, REFMSG_DYN_GETINFO); UpdateGroups(); return TRUE; } case WM_DESTROY: break; case WM_COMMAND: switch (LOWORD(wParam)) { case IDC_OP_FLIPFLUIDBASIS_PGROUP: switch (HIWORD(wParam)) { case CBN_SELCHANGE: { int i = ComboBox_GetCurSel(GBox); PGroup *group = (PGroup*)ComboBox_GetItemData(GBox, i); op->SetPGroup(i ? group : NULL); } break; } break; case IDC_OP_FLIPFLUIDBASIS_SPGROUP: switch (HIWORD(wParam)) { case CBN_SELCHANGE: { int i = ComboBox_GetCurSel(SBox); PGroup *group = (PGroup*)ComboBox_GetItemData(SBox, i); op->SetSPGroup(i ? group : NULL); } break; } break; } break; } return FALSE; } ///////////
46f644b3adac1d35affac8bb9d03b5b772b7adb8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <stdbool.h> #include "image.cuh" #ifdef _WIN32 #include "hip/hip_runtime.h" #endif // _WIN32 #define blockWidth1 32 #define blockHeight1 16 #define blockWidth2 128 #define blocksize3 32 #define blocksize4 64 // 1080/15 = 72 => 64 struct container { unsigned int value; int xPos; }; //quicksort for values int partValue(struct container list[], int left, int right) { int pivot = list[right].value; int x = (left - 1); for (int i = left; i < right; ++i) { if (list[i].value <= pivot) { x++; struct container temp = list[i]; list[i] = list[x]; list[x] = temp; } } struct container temp = list[x + 1]; list[x + 1] = list[right]; list[right] = temp; return x + 1; } void quicksortValue(struct container* list, int left, int right) { if (left < right) { unsigned int pivot = partValue(list, left, right); quicksortValue(list, left, pivot - 1); quicksortValue(list, pivot + 1, right); } } //quicksort for int int partint(unsigned int list[], int left, int right) { unsigned int pivot = list[right]; int x = (left - 1); for (int i = left; i < right; ++i) { if (list[i] < pivot) { x++; unsigned int temp = list[i]; list[i] = list[x]; list[x] = temp; } } unsigned int temp = list[x + 1]; list[x + 1] = list[right]; list[right] = temp; return x + 1; } void quicksortint(unsigned int* list, int left, int right) { if (left < right) { int pivot = partint(list, left, right); quicksortint(list, left, pivot - 1); quicksortint(list, pivot + 1, right); } } //helpermethods __device__ unsigned int MIN(int a, int b) { return a < b ? a : b; } __device__ int MAX(int a, int b) { return a > b ? a : b; } __device__ int MOD(int a, int b) { return ((a % b )+b) % b; } __device__ struct container minContainer(struct container container1, struct container container2) { return container1.value < container2.value ? container1 : container2; } __device__ unsigned int absDevice (int a) { return a < 0 ? (-a) : a; } __global__ void debug(unsigned int* seams, int height, int numSeams) { for (int y = 0; y < height; y++){ for (int x = 0; x < numSeams; x++){ printf("(%d;%d): %d", x, y, seams[y * numSeams + x]); } printf("\n"); } } __global__ void calculatePixelEnergies(unsigned char* inputData, unsigned int* pixelEnergies, int width, int height) { //use "tiling" __shared__ unsigned int inputTile[blockHeight1][blockWidth1 * 3]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int y = by * blockHeight1 + ty; int x = bx * blockWidth1 + tx; int sum; int actualY; int actualX; if (y < height && x < width) { inputTile[ty][tx * 3] = inputData[(y * width + x) * 3]; inputTile[ty][tx * 3 + 1] = inputData[(y * width + x) * 3 + 1]; inputTile[ty][tx * 3 + 2] = inputData[(y * width + x) * 3 + 2]; __syncthreads(); sum = 0; for (int offsetX = -1; offsetX < 2; offsetX++) { for (int offsetY = -1; offsetY < 2; offsetY++) { actualY = MOD((y + offsetY), height); actualX = MOD((x + offsetX), width); bool condition = ((tx + offsetX) < blockWidth1 && (tx + offsetX) > 0 && (ty + offsetY) < blockHeight1 && (ty + offsetY) > 0); sum += absDevice((int)inputTile[ty][tx * 3] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3] : inputData[(actualY * width + actualX) * 3])); +absDevice((int)inputTile[ty][tx * 3 + 1] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3 + 1] : inputData[(actualY * width + actualX) * 3 + 1])); +absDevice((int)inputTile[ty][tx * 3 + 2] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3 + 2] : inputData[(actualY * width + actualX) * 3 + 2])); } } pixelEnergies[y * width + x] = sum; } } __global__ void calculateMinEnergySums(unsigned int* pixelEnergies, struct container* minEnergySums, int width, int row) { int bx = blockIdx.x; int tx = threadIdx.x; int x = bx * blockWidth2 + tx; //use "tiling" __shared__ unsigned int tiledMinEnergySums[blockWidth2]; if (x < width) { if (row == 0) { struct container newContainer; newContainer.value = pixelEnergies[x]; newContainer.xPos = x; minEnergySums[x] = newContainer; }else { tiledMinEnergySums[tx] = minEnergySums[(row - 1) * width + x].value; __syncthreads(); struct container newContainer; if (x == 0) { // leftmost pixel of a row newContainer.value = pixelEnergies[row * width + x] + MIN(tiledMinEnergySums[tx], (tx + 1 < blockWidth2) ? tiledMinEnergySums[tx + 1] : minEnergySums[(row - 1) * width + x + 1].value); }else if (x == width - 1) { // rightmost pixel of a row newContainer.value = pixelEnergies[row * width + x] + MIN((tx - 1 > 0) ? tiledMinEnergySums[tx - 1] : minEnergySums[(row - 1) * width + x - 1].value, tiledMinEnergySums[tx]); }else { newContainer.value = pixelEnergies[row * width + x] + MIN(MIN((tx - 1 > 0) ? tiledMinEnergySums[tx - 1] : minEnergySums[(row - 1) * width + x - 1].value, tiledMinEnergySums[tx]), (tx + 1 < blockWidth2) ? tiledMinEnergySums[tx + 1] : minEnergySums[(row - 1) * width + x + 1].value); } newContainer.xPos = x; minEnergySums[row * width + x] = newContainer; } } } __global__ void calcSeams(struct container* minEnergySums, unsigned int* seams, int inputWidth, int height, int numSeams) { int bx = blockIdx.x; int tx = threadIdx.x; int threadNum = bx * blocksize3 + tx; if (threadNum < numSeams) { for (int y = height - 2; y > -1; y--) { unsigned int prevX = seams[(y + 1) * numSeams + threadNum]; if (prevX == inputWidth - 1) { // rightmost pixel of a row seams[y * numSeams + threadNum] = minContainer(minEnergySums[y * inputWidth + prevX - 1], minEnergySums[y * inputWidth + prevX]).xPos; }else if (prevX == 0) { // leftmost pixel of a row seams[y * numSeams + threadNum] = minContainer(minEnergySums[y * inputWidth + prevX], minEnergySums[y * inputWidth + prevX + 1]).xPos; }else { seams[y * numSeams + threadNum] = minContainer(minContainer(minEnergySums[y * inputWidth + prevX - 1], minEnergySums[y * inputWidth + prevX]), minEnergySums[y * inputWidth + prevX + 1]).xPos; } } } } __global__ void increaseWidth(unsigned char *inputData, unsigned char *outputData, struct container* minEnergySums, unsigned int* seams, int inputWidth, int height, int numSeams) { int bx = blockIdx.x; int tx = threadIdx.x; int threadNum = bx * blocksize4 + tx; if(threadNum < height){ //int to track where we are in the old picture int oldX = -1; int seamIndex = 0; int outputRow = threadNum * (inputWidth + numSeams) * 3; int inputRow = threadNum * inputWidth * 3; for (int x = 0; x < (inputWidth + numSeams); x++) { bool condition = (x > 0 && oldX == seams[threadNum * numSeams + seamIndex] && seamIndex < numSeams); oldX = condition ? oldX: oldX + 1; //copy last pixel if oldX is at a point where a seam is else just copy the pixel of the old picture //no branch divergence outputData[outputRow + x * 3] = condition ? outputData[outputRow + (x - 1) * 3] : inputData[inputRow + oldX * 3]; outputData[outputRow + x * 3 + 1] = condition ? outputData[outputRow + (x - 1) * 3 + 1] : inputData[inputRow + oldX * 3 + 1]; outputData[outputRow + x * 3 + 2] = condition ? outputData[outputRow + (x - 1) * 3 + 2] : inputData[inputRow + oldX * 3 + 2]; seamIndex = condition ? seamIndex + 1 : seamIndex; } } } int main(int argc, char* argv[]) { if (argc < 4) { printf("Usage: %s inputJPEG outputJPEG numSeams\n", argv[0]); return 0; } char* inputFile = argv[1]; char* outputFile = argv[2]; int numSeams = atoi(argv[3]); struct imgRawImage* input = loadJpegImageFile(inputFile); clock_t start = clock(); //host int width = input->width; int height = input->height; size_t inputDataSize_t = sizeof(unsigned char) * width * height * 3; size_t outputDataSize_t = sizeof(unsigned char) * (width + numSeams)* height * 3; size_t pixelEnergiesSize_t = sizeof(unsigned int) * width * height; size_t minEnergySumsSize_t = sizeof(struct container) * height * width; size_t seamsSize_t = sizeof(unsigned int) * numSeams * height; size_t seamStartSize_t = sizeof(unsigned int) * numSeams; size_t lastMinEnergySumsSize_t = sizeof(struct container) * width; unsigned int* seamsStart = (unsigned int*)malloc(seamStartSize_t); struct container* lastMinEnergySums = (struct container*)malloc(lastMinEnergySumsSize_t); unsigned char* outputData = (unsigned char*)malloc(outputDataSize_t); hipError_t cudaStatus; //device unsigned char* d_inputData; unsigned char* d_outputData; unsigned int* d_pixelEnergies; struct container* d_minEnergySums; unsigned int* d_seams; //allocate Devicememory cudaStatus = hipMalloc(&d_inputData, inputDataSize_t); if (cudaStatus != hipSuccess) { fprintf(stderr, "malloc d_inputData failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } hipMemset(d_inputData, 0, inputDataSize_t); cudaStatus = hipMalloc(&d_outputData, outputDataSize_t); if (cudaStatus != hipSuccess) { fprintf(stderr, "malloc d_inputImage failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } hipMemset(d_outputData, 0, outputDataSize_t); cudaStatus = hipMalloc(&d_pixelEnergies, pixelEnergiesSize_t); if (cudaStatus != hipSuccess) { fprintf(stderr, "malloc d_pixelEnergies failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } hipMemset(d_pixelEnergies, 0, pixelEnergiesSize_t); cudaStatus = hipMalloc(&d_minEnergySums, minEnergySumsSize_t); if (cudaStatus != hipSuccess) { fprintf(stderr, "malloc d_minEnergySums failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } hipMemset(d_minEnergySums, 0, minEnergySumsSize_t); cudaStatus = hipMalloc(&d_seams, seamsSize_t); if (cudaStatus != hipSuccess) { fprintf(stderr, "malloc d_minEnergySums failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } hipMemset(d_seams, 0, seamsSize_t); //start kernel1 calculatePixelEnergies cudaStatus = hipMemcpy(d_inputData, input->lpData, inputDataSize_t, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "Memory Copy input->lpData -> d_inputData failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } dim3 threadsPerBlock1(blockWidth1, blockHeight1); dim3 numBlocks1(ceil(width / (double)threadsPerBlock1.x), ceil(height / (double)threadsPerBlock1.y)); hipLaunchKernelGGL(( calculatePixelEnergies), dim3(numBlocks1), dim3(threadsPerBlock1), 0, 0, d_inputData, d_pixelEnergies, width, height); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "calculatePixelEnergies launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize after launch calculatePixelEnergies failed: %s\n", hipGetErrorString(cudaStatus)); } //start kernel2 calculateMinEnergySums dim3 threadsPerBlock2(blockWidth2); dim3 numBlocks2(ceil(width / (double)threadsPerBlock2.x)); for (int i = 0; i < height; i++){ calculateMinEnergySums << <numBlocks2, threadsPerBlock2 >> > (d_pixelEnergies, d_minEnergySums, width, i); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize after launch calculateMinEnergySums failed: %s\n", hipGetErrorString(cudaStatus)); } } cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "calculateMinEnergySums launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize after launch calculateMinEnergySums failed: %s\n", hipGetErrorString(cudaStatus)); } //calculate Seams schauen wegen k>width cudaStatus = hipMemcpy(lastMinEnergySums, d_minEnergySums + width * (height - 1), lastMinEnergySumsSize_t, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "Memory Copy d_minEnergySums -> lastMinEnergySums failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } //sort by value quicksortValue(lastMinEnergySums, 0, width - 1); int seamIndex = 0; int minSumIndex = 0; while (seamIndex < numSeams) { seamsStart[seamIndex] = lastMinEnergySums[minSumIndex].xPos; seamIndex++; minSumIndex = (minSumIndex + 1) % width; } //sort by coordinate quicksortint(seamsStart, 0, numSeams - 1); cudaStatus = hipMemcpy(d_seams + numSeams * (height - 1), seamsStart, seamStartSize_t, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "Memory Copy seamsStart -> d_seams failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } //start kernel3 calcSeams dim3 threadsPerBlock3(blocksize3); dim3 numBlocks3(ceil(numSeams/ (double)blocksize3)); hipLaunchKernelGGL(( calcSeams), dim3(numBlocks3), dim3(threadsPerBlock3) , 0, 0, d_minEnergySums, d_seams, width, height, numSeams); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "calcSeams launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize after launch calcSeams failed: %s\n", hipGetErrorString(cudaStatus)); } //start kernel4 increaseWidth dim3 threadsPerBlock4(blocksize4); dim3 numBlocks4(ceil(height / (double)blocksize4)); hipLaunchKernelGGL(( increaseWidth), dim3(numBlocks4), dim3(threadsPerBlock4), 0, 0, d_inputData, d_outputData, d_minEnergySums, d_seams, width, height, numSeams); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "increaseWidth launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize after launch increaseWidth failed: %s\n", hipGetErrorString(cudaStatus)); } //copy outputData and create image cudaStatus = hipMemcpy(outputData, d_outputData, outputDataSize_t, hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "Memory Copy d_outputData -> outputData failed! ErrorCode %d: %s\n", cudaStatus, hipGetErrorString(cudaStatus)); } input->width = width + numSeams; input->lpData = outputData; //free Memory hipFree(&d_inputData); hipFree(&d_outputData); hipFree(&d_pixelEnergies); hipFree(&d_minEnergySums); hipFree(&d_seams); clock_t end = clock(); printf("Execution time: %4.2f sec\n", (double)((double)(end - start) / CLOCKS_PER_SEC)); storeJpegImageFile(input, outputFile); free(seamsStart); free(lastMinEnergySums); free(outputData); return 0; }
46f644b3adac1d35affac8bb9d03b5b772b7adb8.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <stdbool.h> #include "image.cuh" #ifdef _WIN32 #include "cuda_runtime.h" #endif // _WIN32 #define blockWidth1 32 #define blockHeight1 16 #define blockWidth2 128 #define blocksize3 32 #define blocksize4 64 // 1080/15 = 72 => 64 struct container { unsigned int value; int xPos; }; //quicksort for values int partValue(struct container list[], int left, int right) { int pivot = list[right].value; int x = (left - 1); for (int i = left; i < right; ++i) { if (list[i].value <= pivot) { x++; struct container temp = list[i]; list[i] = list[x]; list[x] = temp; } } struct container temp = list[x + 1]; list[x + 1] = list[right]; list[right] = temp; return x + 1; } void quicksortValue(struct container* list, int left, int right) { if (left < right) { unsigned int pivot = partValue(list, left, right); quicksortValue(list, left, pivot - 1); quicksortValue(list, pivot + 1, right); } } //quicksort for int int partint(unsigned int list[], int left, int right) { unsigned int pivot = list[right]; int x = (left - 1); for (int i = left; i < right; ++i) { if (list[i] < pivot) { x++; unsigned int temp = list[i]; list[i] = list[x]; list[x] = temp; } } unsigned int temp = list[x + 1]; list[x + 1] = list[right]; list[right] = temp; return x + 1; } void quicksortint(unsigned int* list, int left, int right) { if (left < right) { int pivot = partint(list, left, right); quicksortint(list, left, pivot - 1); quicksortint(list, pivot + 1, right); } } //helpermethods __device__ unsigned int MIN(int a, int b) { return a < b ? a : b; } __device__ int MAX(int a, int b) { return a > b ? a : b; } __device__ int MOD(int a, int b) { return ((a % b )+b) % b; } __device__ struct container minContainer(struct container container1, struct container container2) { return container1.value < container2.value ? container1 : container2; } __device__ unsigned int absDevice (int a) { return a < 0 ? (-a) : a; } __global__ void debug(unsigned int* seams, int height, int numSeams) { for (int y = 0; y < height; y++){ for (int x = 0; x < numSeams; x++){ printf("(%d;%d): %d", x, y, seams[y * numSeams + x]); } printf("\n"); } } __global__ void calculatePixelEnergies(unsigned char* inputData, unsigned int* pixelEnergies, int width, int height) { //use "tiling" __shared__ unsigned int inputTile[blockHeight1][blockWidth1 * 3]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int y = by * blockHeight1 + ty; int x = bx * blockWidth1 + tx; int sum; int actualY; int actualX; if (y < height && x < width) { inputTile[ty][tx * 3] = inputData[(y * width + x) * 3]; inputTile[ty][tx * 3 + 1] = inputData[(y * width + x) * 3 + 1]; inputTile[ty][tx * 3 + 2] = inputData[(y * width + x) * 3 + 2]; __syncthreads(); sum = 0; for (int offsetX = -1; offsetX < 2; offsetX++) { for (int offsetY = -1; offsetY < 2; offsetY++) { actualY = MOD((y + offsetY), height); actualX = MOD((x + offsetX), width); bool condition = ((tx + offsetX) < blockWidth1 && (tx + offsetX) > 0 && (ty + offsetY) < blockHeight1 && (ty + offsetY) > 0); sum += absDevice((int)inputTile[ty][tx * 3] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3] : inputData[(actualY * width + actualX) * 3])); +absDevice((int)inputTile[ty][tx * 3 + 1] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3 + 1] : inputData[(actualY * width + actualX) * 3 + 1])); +absDevice((int)inputTile[ty][tx * 3 + 2] - (condition ? inputTile[ty + offsetY][(tx + offsetX) * 3 + 2] : inputData[(actualY * width + actualX) * 3 + 2])); } } pixelEnergies[y * width + x] = sum; } } __global__ void calculateMinEnergySums(unsigned int* pixelEnergies, struct container* minEnergySums, int width, int row) { int bx = blockIdx.x; int tx = threadIdx.x; int x = bx * blockWidth2 + tx; //use "tiling" __shared__ unsigned int tiledMinEnergySums[blockWidth2]; if (x < width) { if (row == 0) { struct container newContainer; newContainer.value = pixelEnergies[x]; newContainer.xPos = x; minEnergySums[x] = newContainer; }else { tiledMinEnergySums[tx] = minEnergySums[(row - 1) * width + x].value; __syncthreads(); struct container newContainer; if (x == 0) { // leftmost pixel of a row newContainer.value = pixelEnergies[row * width + x] + MIN(tiledMinEnergySums[tx], (tx + 1 < blockWidth2) ? tiledMinEnergySums[tx + 1] : minEnergySums[(row - 1) * width + x + 1].value); }else if (x == width - 1) { // rightmost pixel of a row newContainer.value = pixelEnergies[row * width + x] + MIN((tx - 1 > 0) ? tiledMinEnergySums[tx - 1] : minEnergySums[(row - 1) * width + x - 1].value, tiledMinEnergySums[tx]); }else { newContainer.value = pixelEnergies[row * width + x] + MIN(MIN((tx - 1 > 0) ? tiledMinEnergySums[tx - 1] : minEnergySums[(row - 1) * width + x - 1].value, tiledMinEnergySums[tx]), (tx + 1 < blockWidth2) ? tiledMinEnergySums[tx + 1] : minEnergySums[(row - 1) * width + x + 1].value); } newContainer.xPos = x; minEnergySums[row * width + x] = newContainer; } } } __global__ void calcSeams(struct container* minEnergySums, unsigned int* seams, int inputWidth, int height, int numSeams) { int bx = blockIdx.x; int tx = threadIdx.x; int threadNum = bx * blocksize3 + tx; if (threadNum < numSeams) { for (int y = height - 2; y > -1; y--) { unsigned int prevX = seams[(y + 1) * numSeams + threadNum]; if (prevX == inputWidth - 1) { // rightmost pixel of a row seams[y * numSeams + threadNum] = minContainer(minEnergySums[y * inputWidth + prevX - 1], minEnergySums[y * inputWidth + prevX]).xPos; }else if (prevX == 0) { // leftmost pixel of a row seams[y * numSeams + threadNum] = minContainer(minEnergySums[y * inputWidth + prevX], minEnergySums[y * inputWidth + prevX + 1]).xPos; }else { seams[y * numSeams + threadNum] = minContainer(minContainer(minEnergySums[y * inputWidth + prevX - 1], minEnergySums[y * inputWidth + prevX]), minEnergySums[y * inputWidth + prevX + 1]).xPos; } } } } __global__ void increaseWidth(unsigned char *inputData, unsigned char *outputData, struct container* minEnergySums, unsigned int* seams, int inputWidth, int height, int numSeams) { int bx = blockIdx.x; int tx = threadIdx.x; int threadNum = bx * blocksize4 + tx; if(threadNum < height){ //int to track where we are in the old picture int oldX = -1; int seamIndex = 0; int outputRow = threadNum * (inputWidth + numSeams) * 3; int inputRow = threadNum * inputWidth * 3; for (int x = 0; x < (inputWidth + numSeams); x++) { bool condition = (x > 0 && oldX == seams[threadNum * numSeams + seamIndex] && seamIndex < numSeams); oldX = condition ? oldX: oldX + 1; //copy last pixel if oldX is at a point where a seam is else just copy the pixel of the old picture //no branch divergence outputData[outputRow + x * 3] = condition ? outputData[outputRow + (x - 1) * 3] : inputData[inputRow + oldX * 3]; outputData[outputRow + x * 3 + 1] = condition ? outputData[outputRow + (x - 1) * 3 + 1] : inputData[inputRow + oldX * 3 + 1]; outputData[outputRow + x * 3 + 2] = condition ? outputData[outputRow + (x - 1) * 3 + 2] : inputData[inputRow + oldX * 3 + 2]; seamIndex = condition ? seamIndex + 1 : seamIndex; } } } int main(int argc, char* argv[]) { if (argc < 4) { printf("Usage: %s inputJPEG outputJPEG numSeams\n", argv[0]); return 0; } char* inputFile = argv[1]; char* outputFile = argv[2]; int numSeams = atoi(argv[3]); struct imgRawImage* input = loadJpegImageFile(inputFile); clock_t start = clock(); //host int width = input->width; int height = input->height; size_t inputDataSize_t = sizeof(unsigned char) * width * height * 3; size_t outputDataSize_t = sizeof(unsigned char) * (width + numSeams)* height * 3; size_t pixelEnergiesSize_t = sizeof(unsigned int) * width * height; size_t minEnergySumsSize_t = sizeof(struct container) * height * width; size_t seamsSize_t = sizeof(unsigned int) * numSeams * height; size_t seamStartSize_t = sizeof(unsigned int) * numSeams; size_t lastMinEnergySumsSize_t = sizeof(struct container) * width; unsigned int* seamsStart = (unsigned int*)malloc(seamStartSize_t); struct container* lastMinEnergySums = (struct container*)malloc(lastMinEnergySumsSize_t); unsigned char* outputData = (unsigned char*)malloc(outputDataSize_t); cudaError cudaStatus; //device unsigned char* d_inputData; unsigned char* d_outputData; unsigned int* d_pixelEnergies; struct container* d_minEnergySums; unsigned int* d_seams; //allocate Devicememory cudaStatus = cudaMalloc(&d_inputData, inputDataSize_t); if (cudaStatus != cudaSuccess) { fprintf(stderr, "malloc d_inputData failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } cudaMemset(d_inputData, 0, inputDataSize_t); cudaStatus = cudaMalloc(&d_outputData, outputDataSize_t); if (cudaStatus != cudaSuccess) { fprintf(stderr, "malloc d_inputImage failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } cudaMemset(d_outputData, 0, outputDataSize_t); cudaStatus = cudaMalloc(&d_pixelEnergies, pixelEnergiesSize_t); if (cudaStatus != cudaSuccess) { fprintf(stderr, "malloc d_pixelEnergies failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } cudaMemset(d_pixelEnergies, 0, pixelEnergiesSize_t); cudaStatus = cudaMalloc(&d_minEnergySums, minEnergySumsSize_t); if (cudaStatus != cudaSuccess) { fprintf(stderr, "malloc d_minEnergySums failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } cudaMemset(d_minEnergySums, 0, minEnergySumsSize_t); cudaStatus = cudaMalloc(&d_seams, seamsSize_t); if (cudaStatus != cudaSuccess) { fprintf(stderr, "malloc d_minEnergySums failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } cudaMemset(d_seams, 0, seamsSize_t); //start kernel1 calculatePixelEnergies cudaStatus = cudaMemcpy(d_inputData, input->lpData, inputDataSize_t, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memory Copy input->lpData -> d_inputData failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } dim3 threadsPerBlock1(blockWidth1, blockHeight1); dim3 numBlocks1(ceil(width / (double)threadsPerBlock1.x), ceil(height / (double)threadsPerBlock1.y)); calculatePixelEnergies<<<numBlocks1, threadsPerBlock1>>>(d_inputData, d_pixelEnergies, width, height); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "calculatePixelEnergies launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize after launch calculatePixelEnergies failed: %s\n", cudaGetErrorString(cudaStatus)); } //start kernel2 calculateMinEnergySums dim3 threadsPerBlock2(blockWidth2); dim3 numBlocks2(ceil(width / (double)threadsPerBlock2.x)); for (int i = 0; i < height; i++){ calculateMinEnergySums << <numBlocks2, threadsPerBlock2 >> > (d_pixelEnergies, d_minEnergySums, width, i); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize after launch calculateMinEnergySums failed: %s\n", cudaGetErrorString(cudaStatus)); } } cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "calculateMinEnergySums launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize after launch calculateMinEnergySums failed: %s\n", cudaGetErrorString(cudaStatus)); } //calculate Seams schauen wegen k>width cudaStatus = cudaMemcpy(lastMinEnergySums, d_minEnergySums + width * (height - 1), lastMinEnergySumsSize_t, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memory Copy d_minEnergySums -> lastMinEnergySums failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } //sort by value quicksortValue(lastMinEnergySums, 0, width - 1); int seamIndex = 0; int minSumIndex = 0; while (seamIndex < numSeams) { seamsStart[seamIndex] = lastMinEnergySums[minSumIndex].xPos; seamIndex++; minSumIndex = (minSumIndex + 1) % width; } //sort by coordinate quicksortint(seamsStart, 0, numSeams - 1); cudaStatus = cudaMemcpy(d_seams + numSeams * (height - 1), seamsStart, seamStartSize_t, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memory Copy seamsStart -> d_seams failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } //start kernel3 calcSeams dim3 threadsPerBlock3(blocksize3); dim3 numBlocks3(ceil(numSeams/ (double)blocksize3)); calcSeams<<<numBlocks3, threadsPerBlock3 >>>(d_minEnergySums, d_seams, width, height, numSeams); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "calcSeams launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize after launch calcSeams failed: %s\n", cudaGetErrorString(cudaStatus)); } //start kernel4 increaseWidth dim3 threadsPerBlock4(blocksize4); dim3 numBlocks4(ceil(height / (double)blocksize4)); increaseWidth<<<numBlocks4, threadsPerBlock4>>>(d_inputData, d_outputData, d_minEnergySums, d_seams, width, height, numSeams); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "increaseWidth launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize after launch increaseWidth failed: %s\n", cudaGetErrorString(cudaStatus)); } //copy outputData and create image cudaStatus = cudaMemcpy(outputData, d_outputData, outputDataSize_t, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Memory Copy d_outputData -> outputData failed! ErrorCode %d: %s\n", cudaStatus, cudaGetErrorString(cudaStatus)); } input->width = width + numSeams; input->lpData = outputData; //free Memory cudaFree(&d_inputData); cudaFree(&d_outputData); cudaFree(&d_pixelEnergies); cudaFree(&d_minEnergySums); cudaFree(&d_seams); clock_t end = clock(); printf("Execution time: %4.2f sec\n", (double)((double)(end - start) / CLOCKS_PER_SEC)); storeJpegImageFile(input, outputFile); free(seamsStart); free(lastMinEnergySums); free(outputData); return 0; }
d8a034d9cd58b6829a4fac1ddd203dd16a009f84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "main.cuh" __global__ void applyBias(FSystem* System, FBias** LatestBias, double* Mutation, int gen) { int x = blockIdx.x; int y = threadIdx.x; int RandomNum = fabsf((clock64() + 1) * (x + 23) * (y + 56) % 100000); if(RandomNum < *Mutation * 100000 || gen == 1) { //Create 4 Dffrent Random Values int RandomBias1 = fabsf((clock64() * (x + 1) * (y + 1)) % SIZE); int RandomBias2 = fabsf((clock64() * (x + 1 + clock64()) * (y + 1)) % SIZE); int RandomBias3 = fabsf((clock64() * (x + 3 * x) * (y + 1) * clock64()) % SIZE); int RandomBias4 = fabsf((clock64() * (x + y) * (y + -x + clock64() * clock64())) % SIZE); //Copy Bias Perfectly No Mutation System->Bias[x][y].SrcX = RandomBias1 - x; System->Bias[x][y].SrcY = RandomBias2 - y; System->Bias[x][y].DesX = RandomBias3 - x; System->Bias[x][y].DesY = RandomBias4 - y; } else { //Copy Bias Perfectly No Mutation System->Bias[x][y].SrcX = LatestBias[x][y].SrcX; System->Bias[x][y].SrcY = LatestBias[x][y].SrcY; System->Bias[x][y].DesX = LatestBias[x][y].DesX; System->Bias[x][y].DesY = LatestBias[x][y].DesY; } }
d8a034d9cd58b6829a4fac1ddd203dd16a009f84.cu
#include "main.cuh" __global__ void applyBias(FSystem* System, FBias** LatestBias, double* Mutation, int gen) { int x = blockIdx.x; int y = threadIdx.x; int RandomNum = fabsf((clock64() + 1) * (x + 23) * (y + 56) % 100000); if(RandomNum < *Mutation * 100000 || gen == 1) { //Create 4 Dffrent Random Values int RandomBias1 = fabsf((clock64() * (x + 1) * (y + 1)) % SIZE); int RandomBias2 = fabsf((clock64() * (x + 1 + clock64()) * (y + 1)) % SIZE); int RandomBias3 = fabsf((clock64() * (x + 3 * x) * (y + 1) * clock64()) % SIZE); int RandomBias4 = fabsf((clock64() * (x + y) * (y + -x + clock64() * clock64())) % SIZE); //Copy Bias Perfectly No Mutation System->Bias[x][y].SrcX = RandomBias1 - x; System->Bias[x][y].SrcY = RandomBias2 - y; System->Bias[x][y].DesX = RandomBias3 - x; System->Bias[x][y].DesY = RandomBias4 - y; } else { //Copy Bias Perfectly No Mutation System->Bias[x][y].SrcX = LatestBias[x][y].SrcX; System->Bias[x][y].SrcY = LatestBias[x][y].SrcY; System->Bias[x][y].DesX = LatestBias[x][y].DesX; System->Bias[x][y].DesY = LatestBias[x][y].DesY; } }
b40d2eab933d57279a87f0ef89f726802b6115c8.hip
// !!! This is a file automatically generated by hipify!!! #ifndef COMMON_H #include "../include/common.h" #endif int HostToDevice(struct Vector* first, struct Vector* second){ if(first->position != second->position){ hipMemcpy(second->vector, first->vector, sizeof(int)*first->width, hipMemcpyHostToDevice); return 1; } return 0; } int DeviceToHost(struct Vector* first, struct Vector* second){ if(first->position != second->position){ hipMemcpy(second->vector, first->vector, sizeof(int)*first->width, hipMemcpyDeviceToHost); return 1; } return 0; }
b40d2eab933d57279a87f0ef89f726802b6115c8.cu
#ifndef COMMON_H #include "../include/common.h" #endif int HostToDevice(struct Vector* first, struct Vector* second){ if(first->position != second->position){ cudaMemcpy(second->vector, first->vector, sizeof(int)*first->width, cudaMemcpyHostToDevice); return 1; } return 0; } int DeviceToHost(struct Vector* first, struct Vector* second){ if(first->position != second->position){ cudaMemcpy(second->vector, first->vector, sizeof(int)*first->width, cudaMemcpyDeviceToHost); return 1; } return 0; }
14bea9ad92751d8c8b9e25db95edf9c3cb545920.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gen_gpu.h" // ******************** General Mat-Mat Functions ****************** __global__ void gen_matvec(float *A, float *x, float *y, const int m, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < m ){ float c = 0.0f; for(int i=0; i<n; i++) c = c + x[i] * A[xIndex + m * i]; y[xIndex] = c; } } __global__ void gen_matvecT(float *A, float *x, float *y, const int m, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) { float c = 0.0f; for(int i=0; i<m; i++) c = c + y[i] * A[xIndex * m + i]; x[xIndex] = c; } } /* ******************************* ** The matrix multiplication ** ******************************* */ void A_gen(float * out, float * in, float * A, const int m, const int n, dim3 numBlocksm, dim3 threadsPerBlockm) { // perform the multiplication hipLaunchKernelGGL(( gen_matvec) , dim3(numBlocksm), dim3(threadsPerBlockm) , 0, 0, (float*)A, (float*)in, (float*)out, m, n); hipDeviceSynchronize(); return; } /* ***************************************** ** The matrix Transpose multiplication ** ***************************************** */ void AT_gen(float * out, float * in, float * A, const int m, const int n, dim3 numBlocks, dim3 threadsPerBlock) { // perform the multiplication hipLaunchKernelGGL(( gen_matvecT) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, (float*)A, (float*)out, (float*)in, m, n); hipDeviceSynchronize(); return; }
14bea9ad92751d8c8b9e25db95edf9c3cb545920.cu
#include "gen_gpu.h" // ******************** General Mat-Mat Functions ****************** __global__ void gen_matvec(float *A, float *x, float *y, const int m, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < m ){ float c = 0.0f; for(int i=0; i<n; i++) c = c + x[i] * A[xIndex + m * i]; y[xIndex] = c; } } __global__ void gen_matvecT(float *A, float *x, float *y, const int m, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) { float c = 0.0f; for(int i=0; i<m; i++) c = c + y[i] * A[xIndex * m + i]; x[xIndex] = c; } } /* ******************************* ** The matrix multiplication ** ******************************* */ void A_gen(float * out, float * in, float * A, const int m, const int n, dim3 numBlocksm, dim3 threadsPerBlockm) { // perform the multiplication gen_matvec <<< numBlocksm, threadsPerBlockm >>>((float*)A, (float*)in, (float*)out, m, n); cudaThreadSynchronize(); return; } /* ***************************************** ** The matrix Transpose multiplication ** ***************************************** */ void AT_gen(float * out, float * in, float * A, const int m, const int n, dim3 numBlocks, dim3 threadsPerBlock) { // perform the multiplication gen_matvecT <<< numBlocks, threadsPerBlock >>>((float*)A, (float*)out, (float*)in, m, n); cudaThreadSynchronize(); return; }
174e95d7d7997b0653969d9db90a610cd4b7a78a.hip
// !!! This is a file automatically generated by hipify!!! #include "stencil.hpp" #include <hip/hip_runtime.h> #include <vector> #include "aligned_allocator.h" #include <chrono> #include <iostream> using std::chrono::system_clock; using std::chrono::duration; using std::endl; using std::cout; #define BLOCK_DIM_X 32 #define BLOCK_DIM_Y 32 GPUStencil::GPUStencil(const Config &configg, dvec64 &stencil, int block_x, int block_y, int block_t): Stencil(configg, stencil, block_x, block_y, block_t) { cuda_stencil1 = nullptr; cuda_stencil2 = nullptr; grid_dim_x = -1; grid_dim_y = -1; block_dim_x = -1; block_dim_y = -1; } void GPUStencil::AllocGPUMem() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); hipMalloc(&cuda_stencil1, cuda_stencil_size); hipMalloc(&cuda_stencil2, cuda_stencil_size); hipMemset(cuda_stencil1, 0, cuda_stencil_size); hipMemset(cuda_stencil2, 0, cuda_stencil_size); } void GPUStencil::CopyDataFromCPUToGPU() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); hipMemcpy(cuda_stencil1, stencil.data(), cuda_stencil_size, hipMemcpyHostToDevice); } void GPUStencil::CalcBlockAndGridDim() { block_dim_x = BLOCK_DIM_X; block_dim_y = BLOCK_DIM_Y; grid_dim_x = (x_dim + block_dim_x - 3) / (block_dim_x - 2); grid_dim_y = (y_dim + block_dim_y - 3) / (block_dim_y - 2); } void GPUStencil::CopyResultFromGPUToCPU() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); hipMemcpy(stencil.data(), cuda_stencil1, cuda_stencil_size, hipMemcpyDeviceToHost); } void GPUStencil::FreeGPUMem() { hipFree(cuda_stencil1); hipFree(cuda_stencil2); } //2D blocking __global__ void GPUStencilKernel(double *cuda_stencil1, double *cuda_stencil2, int dim_x, int dim_y, int dim_z, int t_steps, double alpha, double beta_x_0, double beta_x_1, double beta_y_0, double beta_y_1, double beta_z_0, double beta_z_1) { int x_index = blockIdx.x * (blockDim.x - 2) + threadIdx.x; int y_index = blockIdx.y * (blockDim.y - 2) + threadIdx.y; int z_size = (dim_x + 2) * (dim_y + 2); int y_size = dim_x + 2; __shared__ double subplanes[3][BLOCK_DIM_Y][BLOCK_DIM_X]; int index = y_index * y_size + x_index; if(x_index < 2 + dim_x && y_index < 2 + dim_y) { subplanes[0][threadIdx.y][threadIdx.x] = cuda_stencil1[index]; index += z_size; subplanes[1][threadIdx.y][threadIdx.x] = cuda_stencil1[index]; } for(int j = 1; j < dim_z + 1; ++j) { //load into shared memory if(x_index < 2 + dim_x && y_index < 2 + dim_y) { subplanes[(j + 1) % 3][threadIdx.y][threadIdx.x] = cuda_stencil1[index + z_size]; } __syncthreads(); if(threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1 && x_index < dim_x + 1 && y_index < dim_y + 1) { cuda_stencil2[index] = alpha * subplanes[j % 3][threadIdx.y][threadIdx.x] + beta_x_0 * subplanes[j % 3][threadIdx.y][threadIdx.x - 1] + beta_x_1 * subplanes[j % 3][threadIdx.y][threadIdx.x + 1] + beta_y_0 * subplanes[j % 3][threadIdx.y - 1][threadIdx.x] + beta_y_1 * subplanes[j % 3][threadIdx.y + 1][threadIdx.x] + beta_z_0 * subplanes[(j + 2) % 3][threadIdx.y][threadIdx.x] + beta_z_1 * subplanes[(j + 1) % 3][threadIdx.y][threadIdx.x]; } index += z_size; __syncthreads(); } } void GPUStencil::Compute() { cout << "allocating gpu memory" << endl; auto t_start = system_clock::now(); AllocGPUMem(); auto t_end = system_clock::now(); duration<double> t_duration = t_end - t_start; cout << "allocating gpu memory time: " << t_duration.count() << endl; cout << "copy data to gpu" << endl; t_start = system_clock::now(); CopyDataFromCPUToGPU(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "copy data to gpu time: " << t_duration.count() << endl; CalcBlockAndGridDim(); dim3 dimBlock(block_dim_x, block_dim_y); dim3 dimGrid(grid_dim_x, grid_dim_y); cout << "gpu kernel start" << endl; t_start = system_clock::now(); double *cur_stencil = cuda_stencil1; for(int i = 0; i < t_steps; ++i) { hipLaunchKernelGGL(( GPUStencilKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, cuda_stencil1, cuda_stencil2, x_dim, y_dim, z_dim, t_steps, alpha, beta_x_0, beta_x_1, beta_y_0, beta_y_1, beta_z_0, beta_z_1); cur_stencil = cuda_stencil2; cuda_stencil2 = cuda_stencil1; cuda_stencil1 = cur_stencil; hipDeviceSynchronize(); } hipDeviceSynchronize(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "gpu kernel time: " << t_duration.count() << endl; cout << "copy result from gpu" << endl; t_start = system_clock::now(); CopyResultFromGPUToCPU(); hipDeviceSynchronize(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "copy result from gpu time: " << t_duration.count() << endl; FreeGPUMem(); }
174e95d7d7997b0653969d9db90a610cd4b7a78a.cu
#include "stencil.hpp" #include <cuda.h> #include <vector> #include "aligned_allocator.h" #include <chrono> #include <iostream> using std::chrono::system_clock; using std::chrono::duration; using std::endl; using std::cout; #define BLOCK_DIM_X 32 #define BLOCK_DIM_Y 32 GPUStencil::GPUStencil(const Config &configg, dvec64 &stencil, int block_x, int block_y, int block_t): Stencil(configg, stencil, block_x, block_y, block_t) { cuda_stencil1 = nullptr; cuda_stencil2 = nullptr; grid_dim_x = -1; grid_dim_y = -1; block_dim_x = -1; block_dim_y = -1; } void GPUStencil::AllocGPUMem() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); cudaMalloc(&cuda_stencil1, cuda_stencil_size); cudaMalloc(&cuda_stencil2, cuda_stencil_size); cudaMemset(cuda_stencil1, 0, cuda_stencil_size); cudaMemset(cuda_stencil2, 0, cuda_stencil_size); } void GPUStencil::CopyDataFromCPUToGPU() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); cudaMemcpy(cuda_stencil1, stencil.data(), cuda_stencil_size, cudaMemcpyHostToDevice); } void GPUStencil::CalcBlockAndGridDim() { block_dim_x = BLOCK_DIM_X; block_dim_y = BLOCK_DIM_Y; grid_dim_x = (x_dim + block_dim_x - 3) / (block_dim_x - 2); grid_dim_y = (y_dim + block_dim_y - 3) / (block_dim_y - 2); } void GPUStencil::CopyResultFromGPUToCPU() { size_t cuda_stencil_size = sizeof(double) * (x_dim + 2) * (y_dim + 2) * (z_dim + 2); cudaMemcpy(stencil.data(), cuda_stencil1, cuda_stencil_size, cudaMemcpyDeviceToHost); } void GPUStencil::FreeGPUMem() { cudaFree(cuda_stencil1); cudaFree(cuda_stencil2); } //2D blocking __global__ void GPUStencilKernel(double *cuda_stencil1, double *cuda_stencil2, int dim_x, int dim_y, int dim_z, int t_steps, double alpha, double beta_x_0, double beta_x_1, double beta_y_0, double beta_y_1, double beta_z_0, double beta_z_1) { int x_index = blockIdx.x * (blockDim.x - 2) + threadIdx.x; int y_index = blockIdx.y * (blockDim.y - 2) + threadIdx.y; int z_size = (dim_x + 2) * (dim_y + 2); int y_size = dim_x + 2; __shared__ double subplanes[3][BLOCK_DIM_Y][BLOCK_DIM_X]; int index = y_index * y_size + x_index; if(x_index < 2 + dim_x && y_index < 2 + dim_y) { subplanes[0][threadIdx.y][threadIdx.x] = cuda_stencil1[index]; index += z_size; subplanes[1][threadIdx.y][threadIdx.x] = cuda_stencil1[index]; } for(int j = 1; j < dim_z + 1; ++j) { //load into shared memory if(x_index < 2 + dim_x && y_index < 2 + dim_y) { subplanes[(j + 1) % 3][threadIdx.y][threadIdx.x] = cuda_stencil1[index + z_size]; } __syncthreads(); if(threadIdx.x > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y > 0 && threadIdx.y < blockDim.y - 1 && x_index < dim_x + 1 && y_index < dim_y + 1) { cuda_stencil2[index] = alpha * subplanes[j % 3][threadIdx.y][threadIdx.x] + beta_x_0 * subplanes[j % 3][threadIdx.y][threadIdx.x - 1] + beta_x_1 * subplanes[j % 3][threadIdx.y][threadIdx.x + 1] + beta_y_0 * subplanes[j % 3][threadIdx.y - 1][threadIdx.x] + beta_y_1 * subplanes[j % 3][threadIdx.y + 1][threadIdx.x] + beta_z_0 * subplanes[(j + 2) % 3][threadIdx.y][threadIdx.x] + beta_z_1 * subplanes[(j + 1) % 3][threadIdx.y][threadIdx.x]; } index += z_size; __syncthreads(); } } void GPUStencil::Compute() { cout << "allocating gpu memory" << endl; auto t_start = system_clock::now(); AllocGPUMem(); auto t_end = system_clock::now(); duration<double> t_duration = t_end - t_start; cout << "allocating gpu memory time: " << t_duration.count() << endl; cout << "copy data to gpu" << endl; t_start = system_clock::now(); CopyDataFromCPUToGPU(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "copy data to gpu time: " << t_duration.count() << endl; CalcBlockAndGridDim(); dim3 dimBlock(block_dim_x, block_dim_y); dim3 dimGrid(grid_dim_x, grid_dim_y); cout << "gpu kernel start" << endl; t_start = system_clock::now(); double *cur_stencil = cuda_stencil1; for(int i = 0; i < t_steps; ++i) { GPUStencilKernel<<<dimGrid, dimBlock>>>(cuda_stencil1, cuda_stencil2, x_dim, y_dim, z_dim, t_steps, alpha, beta_x_0, beta_x_1, beta_y_0, beta_y_1, beta_z_0, beta_z_1); cur_stencil = cuda_stencil2; cuda_stencil2 = cuda_stencil1; cuda_stencil1 = cur_stencil; cudaDeviceSynchronize(); } cudaDeviceSynchronize(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "gpu kernel time: " << t_duration.count() << endl; cout << "copy result from gpu" << endl; t_start = system_clock::now(); CopyResultFromGPUToCPU(); cudaDeviceSynchronize(); t_end = system_clock::now(); t_duration = t_end - t_start; cout << "copy result from gpu time: " << t_duration.count() << endl; FreeGPUMem(); }
7e936301058973cf5d8f0a22c52deb07684ccda9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> /* Problem size */ #define XSIZE 2560 #define YSIZE 2048 /* Divide the problem into blocks of BLOCKX x BLOCKY threads */ #define BLOCKY 8 #define BLOCKX 8 #define MAXITER 255 #define cudaErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %s %d\n", hipGetErrorName(code), hipGetErrorString(code), file, line); if (abort) exit(code); } } double xleft=-2.01; double xright=1; double yupper,ylower; double ycenter=1e-6; double step; int host_pixel[XSIZE*YSIZE]; int device_pixel[XSIZE*YSIZE]; typedef struct { double real,imag; } my_complex_t; #define PIXEL(i,j) ((i)+(j)*XSIZE) /********** SUBTASK1: Create kernel device_calculate *************************/ __global__ void device_calculate(int *imageBlock, double xleft, double yupper, double step){ int i = blockIdx.x * BLOCKX + threadIdx.x; int j = blockIdx.y * BLOCKY + threadIdx.y; my_complex_t c,z,temp; int iter=0; c.real = (xleft + step*i); c.imag = (yupper - step*j); z = c; while(z.real*z.real + z.imag*z.imag < 4.0) { temp.real = z.real*z.real - z.imag*z.imag + c.real; temp.imag = 2.0*z.real*z.imag + c.imag; z = temp; if(++iter==MAXITER) break; } imageBlock[PIXEL(i,j)]=iter; } /********** SUBTASK1 END *****************************************************/ void host_calculate() { for(int j=0;j<YSIZE;j++) { for(int i=0;i<XSIZE;i++) { /* Calculate the number of iterations until divergence for each pixel. If divergence never happens, return MAXITER */ my_complex_t c,z,temp; int iter=0; c.real = (xleft + step*i); c.imag = (yupper - step*j); z = c; while(z.real*z.real + z.imag*z.imag < 4.0) { temp.real = z.real*z.real - z.imag*z.imag + c.real; temp.imag = 2.0*z.real*z.imag + c.imag; z = temp; if(++iter==MAXITER) break; } host_pixel[PIXEL(i,j)]=iter; } } } typedef unsigned char uchar; /* save 24-bits bmp file, buffer must be in bmp format: upside-down */ void savebmp(char *name,uchar *buffer,int x,int y) { FILE *f=fopen(name,"wb"); if(!f) { printf("Error writing image to disk.\n"); return; } unsigned int size=x*y*3+54; uchar header[54]={'B','M',size&255,(size>>8)&255,(size>>16)&255,size>>24,0, 0,0,0,54,0,0,0,40,0,0,0,x&255,x>>8,0,0,y&255,y>>8,0,0,1,0,24,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; fwrite(header,1,54,f); fwrite(buffer,1,x*y*3,f); fclose(f); } /* given iteration number, set a colour */ void fancycolour(uchar *p,int iter) { if(iter==MAXITER); else if(iter<8) { p[0]=128+iter*16; p[1]=p[2]=0; } else if(iter<24) { p[0]=255; p[1]=p[2]=(iter-8)*16; } else if(iter<160) { p[0]=p[1]=255-(iter-24)*2; p[2]=255; } else { p[0]=p[1]=(iter-160)*2; p[2]=255-(iter-160)*2; } } /* * Get system time to microsecond precision (ostensibly, the same as MPI_Wtime), * returns time in seconds */ double walltime ( void ) { static struct timeval t; gettimeofday ( &t, NULL ); return ( t.tv_sec + 1e-6 * t.tv_usec ); } int main(int argc,char **argv) { if(argc==1) { puts("Usage: MANDEL n"); puts("n decides whether image should be written to disk (1=yes, 0=no)"); return 0; } double start; double hosttime=0; double devicetime=0; double memtime=0; hipDeviceProp_t p; hipSetDevice(0); hipGetDeviceProperties (&p, 0); printf("Device compute capability: %d.%d\n", p.major, p.minor); /* Calculate the range in the y-axis such that we preserve the aspect ratio */ step=(xright-xleft)/XSIZE; yupper=ycenter+(step*YSIZE)/2; ylower=ycenter-(step*YSIZE)/2; /* Host calculates image */ start=walltime(); host_calculate(); hosttime+=walltime()-start; /********** SUBTASK2: Set up device memory *******************************/ int *imageBlock; cudaErrorCheck(hipMalloc((void**)&imageBlock, XSIZE*YSIZE * sizeof(int))); /********** SUBTASK2 END *************************************************/ start=walltime(); /********** SUBTASK3: Execute the kernel on the device *******************/ dim3 gridBlock(XSIZE/BLOCKX, YSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipLaunchKernelGGL(( device_calculate), dim3(gridBlock), dim3(threadBlock), 0, 0, imageBlock, xleft, yupper, step); cudaErrorCheck(hipGetLastError()); /********** SUBTASK3 END *************************************************/ devicetime+=walltime()-start; start=walltime(); /********** SUBTASK4: Transfer the result from device to device_pixel[][]*/ cudaErrorCheck(hipMemcpy(device_pixel, imageBlock, XSIZE*YSIZE * sizeof(int), hipMemcpyDeviceToHost)); /********** SUBTASK4 END *************************************************/ memtime+=walltime()-start; /********** SUBTASK5: Free the device memory also ************************/ cudaErrorCheck(hipFree(imageBlock)); /********** SUBTASK5 END *************************************************/ int errors=0; /* check if result is correct */ for(int i=0;i<XSIZE;i++) { for(int j=0;j<YSIZE;j++) { int diff=host_pixel[PIXEL(i,j)]-device_pixel[PIXEL(i,j)]; if(diff<0) diff=-diff; /* allow +-1 difference */ if(diff>1) { if(errors<10) printf("Error on pixel %d %d: expected %d, found %d\n", i,j,host_pixel[PIXEL(i,j)],device_pixel[PIXEL(i,j)]); else if(errors==10) puts("..."); errors++; } } } if(errors>0) printf("Found %d errors.\n",errors); else puts("Device calculations are correct."); printf("\n"); printf("Host time: %7.3f ms\n",hosttime*1e3); printf("Device calculation: %7.3f ms\n",devicetime*1e3); printf("Copy result: %7.3f ms\n",memtime*1e3); if(strtol(argv[1],NULL,10)!=0) { /* create nice image from iteration counts. take care to create it upside down (bmp format) */ unsigned char *buffer=(unsigned char *)calloc(XSIZE*YSIZE*3,1); for(int i=0;i<XSIZE;i++) { for(int j=0;j<YSIZE;j++) { int p=((YSIZE-j-1)*XSIZE+i)*3; fancycolour(buffer+p,device_pixel[PIXEL(i,j)]); } } /* write image to disk */ savebmp("mandel1.bmp",buffer,XSIZE,YSIZE); } return 0; }
7e936301058973cf5d8f0a22c52deb07684ccda9.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> /* Problem size */ #define XSIZE 2560 #define YSIZE 2048 /* Divide the problem into blocks of BLOCKX x BLOCKY threads */ #define BLOCKY 8 #define BLOCKX 8 #define MAXITER 255 #define cudaErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %s %d\n", cudaGetErrorName(code), cudaGetErrorString(code), file, line); if (abort) exit(code); } } double xleft=-2.01; double xright=1; double yupper,ylower; double ycenter=1e-6; double step; int host_pixel[XSIZE*YSIZE]; int device_pixel[XSIZE*YSIZE]; typedef struct { double real,imag; } my_complex_t; #define PIXEL(i,j) ((i)+(j)*XSIZE) /********** SUBTASK1: Create kernel device_calculate *************************/ __global__ void device_calculate(int *imageBlock, double xleft, double yupper, double step){ int i = blockIdx.x * BLOCKX + threadIdx.x; int j = blockIdx.y * BLOCKY + threadIdx.y; my_complex_t c,z,temp; int iter=0; c.real = (xleft + step*i); c.imag = (yupper - step*j); z = c; while(z.real*z.real + z.imag*z.imag < 4.0) { temp.real = z.real*z.real - z.imag*z.imag + c.real; temp.imag = 2.0*z.real*z.imag + c.imag; z = temp; if(++iter==MAXITER) break; } imageBlock[PIXEL(i,j)]=iter; } /********** SUBTASK1 END *****************************************************/ void host_calculate() { for(int j=0;j<YSIZE;j++) { for(int i=0;i<XSIZE;i++) { /* Calculate the number of iterations until divergence for each pixel. If divergence never happens, return MAXITER */ my_complex_t c,z,temp; int iter=0; c.real = (xleft + step*i); c.imag = (yupper - step*j); z = c; while(z.real*z.real + z.imag*z.imag < 4.0) { temp.real = z.real*z.real - z.imag*z.imag + c.real; temp.imag = 2.0*z.real*z.imag + c.imag; z = temp; if(++iter==MAXITER) break; } host_pixel[PIXEL(i,j)]=iter; } } } typedef unsigned char uchar; /* save 24-bits bmp file, buffer must be in bmp format: upside-down */ void savebmp(char *name,uchar *buffer,int x,int y) { FILE *f=fopen(name,"wb"); if(!f) { printf("Error writing image to disk.\n"); return; } unsigned int size=x*y*3+54; uchar header[54]={'B','M',size&255,(size>>8)&255,(size>>16)&255,size>>24,0, 0,0,0,54,0,0,0,40,0,0,0,x&255,x>>8,0,0,y&255,y>>8,0,0,1,0,24,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; fwrite(header,1,54,f); fwrite(buffer,1,x*y*3,f); fclose(f); } /* given iteration number, set a colour */ void fancycolour(uchar *p,int iter) { if(iter==MAXITER); else if(iter<8) { p[0]=128+iter*16; p[1]=p[2]=0; } else if(iter<24) { p[0]=255; p[1]=p[2]=(iter-8)*16; } else if(iter<160) { p[0]=p[1]=255-(iter-24)*2; p[2]=255; } else { p[0]=p[1]=(iter-160)*2; p[2]=255-(iter-160)*2; } } /* * Get system time to microsecond precision (ostensibly, the same as MPI_Wtime), * returns time in seconds */ double walltime ( void ) { static struct timeval t; gettimeofday ( &t, NULL ); return ( t.tv_sec + 1e-6 * t.tv_usec ); } int main(int argc,char **argv) { if(argc==1) { puts("Usage: MANDEL n"); puts("n decides whether image should be written to disk (1=yes, 0=no)"); return 0; } double start; double hosttime=0; double devicetime=0; double memtime=0; cudaDeviceProp p; cudaSetDevice(0); cudaGetDeviceProperties (&p, 0); printf("Device compute capability: %d.%d\n", p.major, p.minor); /* Calculate the range in the y-axis such that we preserve the aspect ratio */ step=(xright-xleft)/XSIZE; yupper=ycenter+(step*YSIZE)/2; ylower=ycenter-(step*YSIZE)/2; /* Host calculates image */ start=walltime(); host_calculate(); hosttime+=walltime()-start; /********** SUBTASK2: Set up device memory *******************************/ int *imageBlock; cudaErrorCheck(cudaMalloc((void**)&imageBlock, XSIZE*YSIZE * sizeof(int))); /********** SUBTASK2 END *************************************************/ start=walltime(); /********** SUBTASK3: Execute the kernel on the device *******************/ dim3 gridBlock(XSIZE/BLOCKX, YSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); device_calculate<<<gridBlock, threadBlock>>>(imageBlock, xleft, yupper, step); cudaErrorCheck(cudaGetLastError()); /********** SUBTASK3 END *************************************************/ devicetime+=walltime()-start; start=walltime(); /********** SUBTASK4: Transfer the result from device to device_pixel[][]*/ cudaErrorCheck(cudaMemcpy(device_pixel, imageBlock, XSIZE*YSIZE * sizeof(int), cudaMemcpyDeviceToHost)); /********** SUBTASK4 END *************************************************/ memtime+=walltime()-start; /********** SUBTASK5: Free the device memory also ************************/ cudaErrorCheck(cudaFree(imageBlock)); /********** SUBTASK5 END *************************************************/ int errors=0; /* check if result is correct */ for(int i=0;i<XSIZE;i++) { for(int j=0;j<YSIZE;j++) { int diff=host_pixel[PIXEL(i,j)]-device_pixel[PIXEL(i,j)]; if(diff<0) diff=-diff; /* allow +-1 difference */ if(diff>1) { if(errors<10) printf("Error on pixel %d %d: expected %d, found %d\n", i,j,host_pixel[PIXEL(i,j)],device_pixel[PIXEL(i,j)]); else if(errors==10) puts("..."); errors++; } } } if(errors>0) printf("Found %d errors.\n",errors); else puts("Device calculations are correct."); printf("\n"); printf("Host time: %7.3f ms\n",hosttime*1e3); printf("Device calculation: %7.3f ms\n",devicetime*1e3); printf("Copy result: %7.3f ms\n",memtime*1e3); if(strtol(argv[1],NULL,10)!=0) { /* create nice image from iteration counts. take care to create it upside down (bmp format) */ unsigned char *buffer=(unsigned char *)calloc(XSIZE*YSIZE*3,1); for(int i=0;i<XSIZE;i++) { for(int j=0;j<YSIZE;j++) { int p=((YSIZE-j-1)*XSIZE+i)*3; fancycolour(buffer+p,device_pixel[PIXEL(i,j)]); } } /* write image to disk */ savebmp("mandel1.bmp",buffer,XSIZE,YSIZE); } return 0; }
4b2950fee9aa0ecb48c5a340afbca7c3f4034507.hip
// !!! This is a file automatically generated by hipify!!! #include "file_system.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> __device__ void user_program(FileSystem *fs, uchar *input, uchar *output) { /* /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); */ /* /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); */ ///* /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; //printf("i is %d, k is %d\n", i, k); fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); //printf("2222222222222i is %d\n", i); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); //printf("33333333333333i is %d\n", i); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); //*/ }
4b2950fee9aa0ecb48c5a340afbca7c3f4034507.cu
#include "file_system.h" #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> __device__ void user_program(FileSystem *fs, uchar *input, uchar *output) { /* /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); */ /* /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); */ ///* /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; //printf("i is %d, k is %d\n", i, k); fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); //printf("2222222222222i is %d\n", i); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); //printf("33333333333333i is %d\n", i); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); //*/ }
ec7e6221056982d7c7ea5904ec74f5c70eee3762.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" extern "C" { #include "ppm_lib.h" } #include <stdlib.h> #include <time.h> #include <stdio.h> #include <math.h> #include "string.h" //Defini la taille du filtre (Sa dimension peut-etre de 3,5,7,9,11.....) #define DIMFILTRE 5 #define rebord 6 #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif //Insere un pixel (tab de deux cases) dans un tableau l'indice indiqu et decale toute les valeurs se trouvant droite de l'indice vers la droite void insererDansTableauTrie(long *tab, int tailleTab, long tab2[2], int indice) { int temp0; int temp1; for (int y = indice; y != tailleTab; y = y + 2) { temp0 = tab[y]; tab[y] = tab2[0]; tab2[0] = temp0; temp1 = tab[y + 1]; tab[y + 1] = tab2[1]; tab2[1] = temp1; } } //insere dans un tableau tri le pixel au bonne endroit et ralise les dcalages ncssaires //Le tab est un tab 2D en 1D la premiere valeurs est la position du pixels et la deuxieme son poids void rangerPixelDansTab(long *tab, long tab2[2], int tailleTab) { for (int i = tailleTab - 2; i >= -2; i = i - 2) { if (tab[i + 1] >= tab2[1]) { if (i == tailleTab - 2) { break; } insererDansTableauTrie(tab, tailleTab, tab2, i + 2); break; } if (i == 0) { insererDansTableauTrie(tab, tailleTab, tab2, i); } } } //La taille du filtre est modifiable, il faut aussi chang DIMFILTRE plus haut __constant__ int filtre[DIMFILTRE*DIMFILTRE] = { 5,5,4,2,5,4,7,8,2,1,5,4,0,1,2,4,5,7,5,4,8,4,5,4,6 }; //Kernel permettant de calculer les Vij //On prend autant de block que de ligne dans l'image, est 1024 thread. Chaque thread traitent 1 ou plusieurs pixels __global__ void calculVijsSharedMemory(PPMPixel* tabPixels, long* valeurs, int* tailleImageX) { //On est oblig de recuperer la taille de l'image car blockDim.x ne corespond pas forcment a la taille de l'image en X //Puisque dans le cas o l'image une taille en x superieur 1024 on ne peux pas prendre autant de thread que de pixel en largeur //threadIdx.x correspond au numero de la colonne et blockIdx.x au numero de la ligne int TID = threadIdx.x + blockIdx.x * (*tailleImageX); int index = threadIdx.x; //Dans la version avec la memoire partag, pour chaque block on met en mmoire partag seulement les pixels qui seront utiles au calculs de Vij pour la ligne courante //On ne connait pas l'avance la taille de ce tableau extern __shared__ PPMPixel pixelsProche[]; //Chaque thread du block renseigne une colonne du tableau int indexmp = index; int TIDtmp = TID; while (indexmp < (*tailleImageX)) { for (int i = 0; i != DIMFILTRE; i++) { pixelsProche[indexmp + (i* (*tailleImageX))] = tabPixels[TIDtmp + (i* (*tailleImageX)) - ((DIMFILTRE / 2)*(*tailleImageX))]; } indexmp += 1024; TIDtmp += 1024; } //On attends que tout les threads aient fini leurs travails __syncthreads(); while (index < (*tailleImageX)) { //Si le numero du thread ne correspond pas un pixel sur les bords if (index >= rebord && index < (*tailleImageX) - rebord && (blockIdx.x >= rebord) && blockIdx.x < (gridDim.x) - (rebord)) { //Calcul de V(i,j) int indiceFiltre = 0; for (int b = (-DIMFILTRE / 2); b != (DIMFILTRE / 2) + 1; b++) { for (int y = (-DIMFILTRE / 2); y != (DIMFILTRE / 2) + 1; y++) { int numeroPixel = index + ((DIMFILTRE / 2) * (*tailleImageX)) + (b * (*tailleImageX)) + y; valeurs[TID] += filtre[indiceFiltre] * (pixelsProche[numeroPixel].red + pixelsProche[numeroPixel].green); indiceFiltre++; } } } else { //V ij egale 0 sur les bords valeurs[TID] = 0; } //Si l'image a plus de 1024 thread alors il faut continuer traiter les pixels pas encore traits //Dans ce cas l le thread numero id va s'occuper de traiter le pixel id + 1024 (blockDim.x = 1024 ) index += 1024; TID += 1024; } } //Cache les characteres dans l'image void cacherChars(PPMImage *img, char c[]) { //On recupere la taille de la chaine de chararactere int stringLength = strlen(c); int tailleTabPixel = 2 * 8 * strlen(c); //Tableau recuperant les n pixels les plus lourd long *tabPixels = (long *)malloc(tailleTabPixel * sizeof(long)); //On initialise le tableau 0 for (int i = 0; i != tailleTabPixel; i++) { tabPixels[i] = 0; } PPMPixel *pixelsList = img->data; PPMPixel *dev_pixels; long *tabValeur = (long *)malloc(img->x*img->y * sizeof(long)); long *dev_Valeurs; int tailleImgX = img->x; int *dev_TailleImgX; hipMalloc((void**)&dev_pixels, img->x*img->y * sizeof(PPMPixel)); hipMalloc((void**)&dev_Valeurs, img->x*img->y * sizeof(long)); hipMalloc((void**)&dev_TailleImgX, sizeof(int)); //Copie du tableau de pixels sur le GPU hipMemcpy(dev_pixels, pixelsList, img->x*img->y * sizeof(PPMPixel), hipMemcpyHostToDevice); hipMemcpy(dev_TailleImgX, &tailleImgX, sizeof(int), hipMemcpyHostToDevice); //Lacement du kernel calculVijsSharedMemory << <(img->y), 1024 ,img->x*DIMFILTRE*sizeof(PPMPixel)>> > (dev_pixels, dev_Valeurs, dev_TailleImgX); //Copie du tableau de valeurs du GPU vers le CPU hipMemcpy(tabValeur, dev_Valeurs, img->x*img->y * sizeof(long), hipMemcpyDeviceToHost); /* liberer la memoire allouee sur le GPU */ hipFree(dev_pixels); hipFree(dev_Valeurs); hipFree(dev_TailleImgX); //On cherche les n pixels le plus grand Vij for (int v = 0; v != (img->x) * (img->y); v++) { if (tabValeur[v] != 0) { long tab[2] = { v,tabValeur[v] }; rangerPixelDansTab(tabPixels, tab, tailleTabPixel); } } free(tabValeur); //Pour chaque caractere coder : for (int y = 0; y != stringLength; y++) { int dec = c[y]; //Pour chaque bit for (int i = 0; i < 8; i++) { if (dec - pow(2, 7 - i) >= 0) { dec = dec - pow(2, 7 - i); //Si le bit a coder est 1 mais le bit de poids faible du bleu du pixel est 0 alors on le change en 1 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 == 0) { img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue += 1; } } else { //Si le bit a coder est 0 mais le bit de poids faible du bleu du pixel est 0 alors on le change en 1 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 != 0) { img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue -= 1; } } } } } //Trouve les characteres cachs dans l'image void trouverChars(PPMImage *img, int nbChar) { //Initialisation du tableau contenant les n valeurs les plus haute int tailleTabPixel = 2 * 8 * nbChar; //Ce tableau contient les n valeurs les plus lourdes ainsi que leur position coresspondante dans l'image long *tabPixels = (long *)malloc(tailleTabPixel * sizeof(long)); //On initialise le tableau 0 for (int i = 0; i != tailleTabPixel; i++) { tabPixels[i] = 0; } PPMPixel *pixelsList = img->data; PPMPixel *dev_pixels; long *tabValeur = (long *)malloc(img->x*img->y * sizeof(long)); long *dev_Valeurs; int tailleImgX = img->x; int *dev_TailleImgX; hipMalloc((void**)&dev_pixels, img->x*img->y * sizeof(PPMPixel)); hipMalloc((void**)&dev_Valeurs, img->x*img->y * sizeof(long)); hipMalloc((void**)&dev_TailleImgX, sizeof(int)); //Copie du tableau de pixels sur le GPU hipMemcpy(dev_pixels, pixelsList, img->x*img->y * sizeof(PPMPixel), hipMemcpyHostToDevice); hipMemcpy(dev_TailleImgX, &tailleImgX, sizeof(int), hipMemcpyHostToDevice); //Lacement du kernel calculVijsSharedMemory << <(img->y), 1024, img->x*DIMFILTRE * sizeof(PPMPixel) >> > (dev_pixels, dev_Valeurs, dev_TailleImgX); //Copie du tableau de valeurs du GPU vers le CPU hipMemcpy(tabValeur, dev_Valeurs, img->x*img->y * sizeof(long), hipMemcpyDeviceToHost); /* liberer la memoire allouee sur le GPU */ hipFree(dev_pixels); hipFree(dev_Valeurs); hipFree(dev_TailleImgX); //On cherche les 8 pixels les plus <<lourds>> for (int v = 0; v != (img->x) * (img->y); v++) { if (tabValeur[v] != 0) { long tab[2] = { v,tabValeur[v] }; rangerPixelDansTab(tabPixels, tab, tailleTabPixel); } } free(tabValeur); for (int y = 0; y != nbChar; y++) { //Pour chaque octet char* dest = (char *)malloc(8); for (int i = 0; i != 8; i++) { //Si le bit de poids faible de la couleur bleu est 0 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 == 0) { dest[i] = '0'; } //Si le bit de poids faible de la couleur bleu est 1 else { dest[i] = '1'; } } char e = strtol(dest, (char **)NULL, 2); printf(" Charactere trouve : %c \n", e); } } int main() { hipFree(0); PPMImage *image; //Ouverture de l'image image = readPPM("images/gare.ppm"); //Chaine de char cacher char c[] = "test cacher un char"; //On affiche la taille du printf("Largeur : %d hauteur : %d \n", image->x, image->y); int nbImage = 1; //On cache le char dans l'image clock_t d = clock(); for(int i = 0; i != nbImage ; i++) cacherChars(image, c); clock_t f = clock(); double time_taken = double(f - d) / double(CLOCKS_PER_SEC); printf("%f \n", time_taken); //On recherche les n char dans l'image trouverChars(image, 19); return 0; }
ec7e6221056982d7c7ea5904ec74f5c70eee3762.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" extern "C" { #include "ppm_lib.h" } #include <stdlib.h> #include <time.h> #include <stdio.h> #include <math.h> #include "string.h" //Defini la taille du filtre (Sa dimension peut-etre de 3,5,7,9,11.....) #define DIMFILTRE 5 #define rebord 6 #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #define printf(f, ...) ((void)(f, __VA_ARGS__),0) #endif //Insere un pixel (tab de deux cases) dans un tableau à l'indice indiqué et decale toute les valeurs se trouvant à droite de l'indice vers la droite void insererDansTableauTrie(long *tab, int tailleTab, long tab2[2], int indice) { int temp0; int temp1; for (int y = indice; y != tailleTab; y = y + 2) { temp0 = tab[y]; tab[y] = tab2[0]; tab2[0] = temp0; temp1 = tab[y + 1]; tab[y + 1] = tab2[1]; tab2[1] = temp1; } } //insere dans un tableau trié le pixel au bonne endroit et réalise les décalages nécéssaires //Le tab est un tab 2D en 1D la premiere valeurs est la position du pixels et la deuxieme son poids void rangerPixelDansTab(long *tab, long tab2[2], int tailleTab) { for (int i = tailleTab - 2; i >= -2; i = i - 2) { if (tab[i + 1] >= tab2[1]) { if (i == tailleTab - 2) { break; } insererDansTableauTrie(tab, tailleTab, tab2, i + 2); break; } if (i == 0) { insererDansTableauTrie(tab, tailleTab, tab2, i); } } } //La taille du filtre est modifiable, il faut aussi changé DIMFILTRE plus haut __constant__ int filtre[DIMFILTRE*DIMFILTRE] = { 5,5,4,2,5,4,7,8,2,1,5,4,0,1,2,4,5,7,5,4,8,4,5,4,6 }; //Kernel permettant de calculer les Vij //On prend autant de block que de ligne dans l'image, est 1024 thread. Chaque thread traitent 1 ou plusieurs pixels __global__ void calculVijsSharedMemory(PPMPixel* tabPixels, long* valeurs, int* tailleImageX) { //On est obligé de recuperer la taille de l'image car blockDim.x ne corespond pas forcément a la taille de l'image en X //Puisque dans le cas où l'image à une taille en x superieur à 1024 on ne peux pas prendre autant de thread que de pixel en largeur //threadIdx.x correspond au numero de la colonne et blockIdx.x au numero de la ligne int TID = threadIdx.x + blockIdx.x * (*tailleImageX); int index = threadIdx.x; //Dans la version avec la memoire partagé, pour chaque block on met en mémoire partagé seulement les pixels qui seront utiles au calculs de Vij pour la ligne courante //On ne connait pas à l'avance la taille de ce tableau extern __shared__ PPMPixel pixelsProche[]; //Chaque thread du block renseigne une colonne du tableau int indexmp = index; int TIDtmp = TID; while (indexmp < (*tailleImageX)) { for (int i = 0; i != DIMFILTRE; i++) { pixelsProche[indexmp + (i* (*tailleImageX))] = tabPixels[TIDtmp + (i* (*tailleImageX)) - ((DIMFILTRE / 2)*(*tailleImageX))]; } indexmp += 1024; TIDtmp += 1024; } //On attends que tout les threads aient fini leurs travails __syncthreads(); while (index < (*tailleImageX)) { //Si le numero du thread ne correspond pas à un pixel sur les bords if (index >= rebord && index < (*tailleImageX) - rebord && (blockIdx.x >= rebord) && blockIdx.x < (gridDim.x) - (rebord)) { //Calcul de V(i,j) int indiceFiltre = 0; for (int b = (-DIMFILTRE / 2); b != (DIMFILTRE / 2) + 1; b++) { for (int y = (-DIMFILTRE / 2); y != (DIMFILTRE / 2) + 1; y++) { int numeroPixel = index + ((DIMFILTRE / 2) * (*tailleImageX)) + (b * (*tailleImageX)) + y; valeurs[TID] += filtre[indiceFiltre] * (pixelsProche[numeroPixel].red + pixelsProche[numeroPixel].green); indiceFiltre++; } } } else { //V ij egale à 0 sur les bords valeurs[TID] = 0; } //Si l'image a plus de 1024 thread alors il faut continuer à traiter les pixels pas encore traités //Dans ce cas là le thread numero id va s'occuper de traiter le pixel id + 1024 (blockDim.x = 1024 ) index += 1024; TID += 1024; } } //Cache les characteres dans l'image void cacherChars(PPMImage *img, char c[]) { //On recupere la taille de la chaine de chararactere int stringLength = strlen(c); int tailleTabPixel = 2 * 8 * strlen(c); //Tableau recuperant les n pixels les plus lourd long *tabPixels = (long *)malloc(tailleTabPixel * sizeof(long)); //On initialise le tableau à 0 for (int i = 0; i != tailleTabPixel; i++) { tabPixels[i] = 0; } PPMPixel *pixelsList = img->data; PPMPixel *dev_pixels; long *tabValeur = (long *)malloc(img->x*img->y * sizeof(long)); long *dev_Valeurs; int tailleImgX = img->x; int *dev_TailleImgX; cudaMalloc((void**)&dev_pixels, img->x*img->y * sizeof(PPMPixel)); cudaMalloc((void**)&dev_Valeurs, img->x*img->y * sizeof(long)); cudaMalloc((void**)&dev_TailleImgX, sizeof(int)); //Copie du tableau de pixels sur le GPU cudaMemcpy(dev_pixels, pixelsList, img->x*img->y * sizeof(PPMPixel), cudaMemcpyHostToDevice); cudaMemcpy(dev_TailleImgX, &tailleImgX, sizeof(int), cudaMemcpyHostToDevice); //Lacement du kernel calculVijsSharedMemory << <(img->y), 1024 ,img->x*DIMFILTRE*sizeof(PPMPixel)>> > (dev_pixels, dev_Valeurs, dev_TailleImgX); //Copie du tableau de valeurs du GPU vers le CPU cudaMemcpy(tabValeur, dev_Valeurs, img->x*img->y * sizeof(long), cudaMemcpyDeviceToHost); /* liberer la memoire allouee sur le GPU */ cudaFree(dev_pixels); cudaFree(dev_Valeurs); cudaFree(dev_TailleImgX); //On cherche les n pixels le plus grand Vij for (int v = 0; v != (img->x) * (img->y); v++) { if (tabValeur[v] != 0) { long tab[2] = { v,tabValeur[v] }; rangerPixelDansTab(tabPixels, tab, tailleTabPixel); } } free(tabValeur); //Pour chaque caractere à coder : for (int y = 0; y != stringLength; y++) { int dec = c[y]; //Pour chaque bit for (int i = 0; i < 8; i++) { if (dec - pow(2, 7 - i) >= 0) { dec = dec - pow(2, 7 - i); //Si le bit a coder est 1 mais le bit de poids faible du bleu du pixel est 0 alors on le change en 1 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 == 0) { img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue += 1; } } else { //Si le bit a coder est 0 mais le bit de poids faible du bleu du pixel est 0 alors on le change en 1 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 != 0) { img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue -= 1; } } } } } //Trouve les characteres cachés dans l'image void trouverChars(PPMImage *img, int nbChar) { //Initialisation du tableau contenant les n valeurs les plus haute int tailleTabPixel = 2 * 8 * nbChar; //Ce tableau contient les n valeurs les plus lourdes ainsi que leur position coresspondante dans l'image long *tabPixels = (long *)malloc(tailleTabPixel * sizeof(long)); //On initialise le tableau à 0 for (int i = 0; i != tailleTabPixel; i++) { tabPixels[i] = 0; } PPMPixel *pixelsList = img->data; PPMPixel *dev_pixels; long *tabValeur = (long *)malloc(img->x*img->y * sizeof(long)); long *dev_Valeurs; int tailleImgX = img->x; int *dev_TailleImgX; cudaMalloc((void**)&dev_pixels, img->x*img->y * sizeof(PPMPixel)); cudaMalloc((void**)&dev_Valeurs, img->x*img->y * sizeof(long)); cudaMalloc((void**)&dev_TailleImgX, sizeof(int)); //Copie du tableau de pixels sur le GPU cudaMemcpy(dev_pixels, pixelsList, img->x*img->y * sizeof(PPMPixel), cudaMemcpyHostToDevice); cudaMemcpy(dev_TailleImgX, &tailleImgX, sizeof(int), cudaMemcpyHostToDevice); //Lacement du kernel calculVijsSharedMemory << <(img->y), 1024, img->x*DIMFILTRE * sizeof(PPMPixel) >> > (dev_pixels, dev_Valeurs, dev_TailleImgX); //Copie du tableau de valeurs du GPU vers le CPU cudaMemcpy(tabValeur, dev_Valeurs, img->x*img->y * sizeof(long), cudaMemcpyDeviceToHost); /* liberer la memoire allouee sur le GPU */ cudaFree(dev_pixels); cudaFree(dev_Valeurs); cudaFree(dev_TailleImgX); //On cherche les 8 pixels les plus <<lourds>> for (int v = 0; v != (img->x) * (img->y); v++) { if (tabValeur[v] != 0) { long tab[2] = { v,tabValeur[v] }; rangerPixelDansTab(tabPixels, tab, tailleTabPixel); } } free(tabValeur); for (int y = 0; y != nbChar; y++) { //Pour chaque octet char* dest = (char *)malloc(8); for (int i = 0; i != 8; i++) { //Si le bit de poids faible de la couleur bleu est 0 if (img->data[tabPixels[(i * 2) + (y * 8 * 2)]].blue % 2 == 0) { dest[i] = '0'; } //Si le bit de poids faible de la couleur bleu est 1 else { dest[i] = '1'; } } char e = strtol(dest, (char **)NULL, 2); printf(" Charactere trouve : %c \n", e); } } int main() { cudaFree(0); PPMImage *image; //Ouverture de l'image image = readPPM("images/gare.ppm"); //Chaine de char à cacher char c[] = "test cacher un char"; //On affiche la taille du printf("Largeur : %d hauteur : %d \n", image->x, image->y); int nbImage = 1; //On cache le char dans l'image clock_t d = clock(); for(int i = 0; i != nbImage ; i++) cacherChars(image, c); clock_t f = clock(); double time_taken = double(f - d) / double(CLOCKS_PER_SEC); printf("%f \n", time_taken); //On recherche les n char dans l'image trouverChars(image, 19); return 0; }
74e5dbe8b8776464b0ce75c3a5b1a3954a731d9e.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <limits> #include "hip/hip_runtime.h" #include "DataFormats/EcalDigi/interface/EcalDataFrame.h" #include "DataFormats/Math/interface/approx_exp.h" #include "DataFormats/Math/interface/approx_log.h" #include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" #include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" #include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" #include "inplace_fnnls.h" #include "AmplitudeComputationKernelsV1.h" #include "AmplitudeComputationCommonKernels.h" namespace ecal { namespace multifit { void eigen_solve_submatrix(SampleMatrix& mat, SampleVector& invec, SampleVector& outvec, unsigned NP) { using namespace Eigen; switch (NP) { // pulse matrix is always square. case 10: { Matrix<SampleMatrix::Scalar, 10, 10> temp = mat.topLeftCorner<10, 10>(); outvec.head<10>() = temp.ldlt().solve(invec.head<10>()); break; } case 9: { Matrix<SampleMatrix::Scalar, 9, 9> temp = mat.topLeftCorner<9, 9>(); outvec.head<9>() = temp.ldlt().solve(invec.head<9>()); break; } case 8: { Matrix<SampleMatrix::Scalar, 8, 8> temp = mat.topLeftCorner<8, 8>(); outvec.head<8>() = temp.ldlt().solve(invec.head<8>()); break; } case 7: { Matrix<SampleMatrix::Scalar, 7, 7> temp = mat.topLeftCorner<7, 7>(); outvec.head<7>() = temp.ldlt().solve(invec.head<7>()); break; } case 6: { Matrix<SampleMatrix::Scalar, 6, 6> temp = mat.topLeftCorner<6, 6>(); outvec.head<6>() = temp.ldlt().solve(invec.head<6>()); break; } case 5: { Matrix<SampleMatrix::Scalar, 5, 5> temp = mat.topLeftCorner<5, 5>(); outvec.head<5>() = temp.ldlt().solve(invec.head<5>()); break; } case 4: { Matrix<SampleMatrix::Scalar, 4, 4> temp = mat.topLeftCorner<4, 4>(); outvec.head<4>() = temp.ldlt().solve(invec.head<4>()); break; } case 3: { Matrix<SampleMatrix::Scalar, 3, 3> temp = mat.topLeftCorner<3, 3>(); outvec.head<3>() = temp.ldlt().solve(invec.head<3>()); break; } case 2: { Matrix<SampleMatrix::Scalar, 2, 2> temp = mat.topLeftCorner<2, 2>(); outvec.head<2>() = temp.ldlt().solve(invec.head<2>()); break; } case 1: { Matrix<SampleMatrix::Scalar, 1, 1> temp = mat.topLeftCorner<1, 1>(); outvec.head<1>() = temp.ldlt().solve(invec.head<1>()); break; } default: return; } } #define PRINT_MATRIX_10x10(M) \ printf( \ "%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f " \ "%f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f " \ "%f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n", \ M(0, 0), \ M(1, 0), \ M(2, 0), \ M(3, 0), \ M(4, 0), \ M(5, 0), \ M(6, 0), \ M(7, 0), \ M(8, 0), \ M(9, 0), \ M(0, 1), \ M(1, 1), \ M(2, 1), \ M(3, 1), \ M(4, 1), \ M(5, 1), \ M(6, 1), \ M(7, 1), \ M(8, 1), \ M(9, 1), \ M(0, 2), \ M(1, 2), \ M(2, 2), \ M(3, 2), \ M(4, 2), \ M(5, 2), \ M(6, 2), \ M(7, 2), \ M(8, 2), \ M(9, 2), \ M(0, 3), \ M(1, 3), \ M(2, 3), \ M(3, 3), \ M(4, 3), \ M(5, 3), \ M(6, 3), \ M(7, 3), \ M(8, 3), \ M(9, 3), \ M(0, 4), \ M(1, 4), \ M(2, 4), \ M(3, 4), \ M(4, 4), \ M(5, 4), \ M(6, 4), \ M(7, 4), \ M(8, 4), \ M(9, 4), \ M(0, 5), \ M(1, 5), \ M(2, 5), \ M(3, 5), \ M(4, 5), \ M(5, 5), \ M(6, 5), \ M(7, 5), \ M(8, 5), \ M(9, 5), \ M(0, 6), \ M(1, 6), \ M(2, 6), \ M(3, 6), \ M(4, 6), \ M(5, 6), \ M(6, 6), \ M(7, 6), \ M(8, 6), \ M(9, 6), \ M(0, 7), \ M(1, 7), \ M(2, 7), \ M(3, 7), \ M(4, 7), \ M(5, 7), \ M(6, 7), \ M(7, 7), \ M(8, 7), \ M(9, 7), \ M(0, 8), \ M(1, 8), \ M(2, 8), \ M(3, 8), \ M(4, 8), \ M(5, 8), \ M(6, 8), \ M(7, 8), \ M(8, 8), \ M(9, 8), \ M(0, 9), \ M(1, 9), \ M(2, 9), \ M(3, 9), \ M(4, 9), \ M(5, 9), \ M(6, 9), \ M(7, 9), \ M(8, 9), \ M(9, 9)) __device__ __forceinline__ bool update_covariance(SampleMatrix const& noisecov, FullSampleMatrix const& full_pulse_cov, SampleMatrix& inverse_cov, BXVectorType const& bxs, SampleDecompLLT& covariance_decomposition, SampleVector const& amplitudes) { constexpr int nsamples = SampleVector::RowsAtCompileTime; constexpr int npulses = BXVectorType::RowsAtCompileTime; inverse_cov = noisecov; for (unsigned int ipulse = 0; ipulse < npulses; ipulse++) { if (amplitudes.coeff(ipulse) == 0) continue; int bx = bxs.coeff(ipulse); int first_sample_t = ::max(0, bx + 3); int offset = 7 - 3 - bx; auto const value = amplitudes.coeff(ipulse); auto const value_sq = value * value; unsigned int nsample_pulse = nsamples - first_sample_t; inverse_cov.block(first_sample_t, first_sample_t, nsample_pulse, nsample_pulse) += value_sq * full_pulse_cov.block(first_sample_t + offset, first_sample_t + offset, nsample_pulse, nsample_pulse); } return true; } __device__ __forceinline__ SampleVector::Scalar compute_chi2(SampleDecompLLT& covariance_decomposition, PulseMatrixType const& pulse_matrix, SampleVector const& amplitudes, SampleVector const& samples) { return covariance_decomposition.matrixL().solve(pulse_matrix * amplitudes - samples).squaredNorm(); } /// /// launch ctx parameters are (nchannels / block, blocks) /// TODO: trivial impl for now, there must be a way to improve /// /// Conventions: /// - amplitudes -> solution vector, what we are fitting for /// - samples -> raw detector responses /// - passive constraint - satisfied constraint /// - active constraint - unsatisfied (yet) constraint /// __global__ void kernel_minimize(SampleMatrix const* noisecov, FullSampleMatrix const* full_pulse_cov, BXVectorType* bxs, SampleVector const* samples, SampleVector* amplitudes, PulseMatrixType* pulse_matrix, ::ecal::reco::StorageScalarType* chi2s, char* acState, int nchannels, int max_iterations) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < nchannels) { if (static_cast<MinimizationState>(acState[idx]) == MinimizationState::Precomputed) return; // inits int iter = 0; int npassive = 0; // inits SampleDecompLLT covariance_decomposition; SampleMatrix inverse_cov; SampleVector::Scalar chi2 = 0, chi2_now = 0; #ifdef ECAL_MULTIFIT_KERNEL_MINIMIZE_V1 // PRINT_MATRIX_10x10(noisecov[idx]); #endif // loop until ocnverge while (true) { if (iter >= max_iterations) break; update_covariance( noisecov[idx], full_pulse_cov[idx], inverse_cov, bxs[idx], covariance_decomposition, amplitudes[idx]); // compute actual covariance decomposition covariance_decomposition.compute(inverse_cov); // prepare input matrices for fnnls SampleMatrix A = covariance_decomposition.matrixL().solve(pulse_matrix[idx]); SampleVector b = covariance_decomposition.matrixL().solve(samples[idx]); inplace_fnnls(A, b, amplitudes[idx], npassive, bxs[idx], pulse_matrix[idx]); chi2_now = compute_chi2(covariance_decomposition, pulse_matrix[idx], amplitudes[idx], samples[idx]); auto deltachi2 = chi2_now - chi2; #ifdef ECAL_MULTIFIT_KERNEL_MINIMIZE_V1 if (iter > 10) { printf("idx = %d iter = %d chi2 = %f chi2old = %f\n", idx, iter, chi2_now, chi2); printf("noisecov(0, i): %f %f %f %f %f %f %f %f %f %f\n", noisecov[idx](0, 0), noisecov[idx](0, 1), noisecov[idx](0, 2), noisecov[idx](0, 3), noisecov[idx](0, 4), noisecov[idx](0, 5), noisecov[idx](0, 6), noisecov[idx](0, 7), noisecov[idx](0, 8), noisecov[idx](0, 9)); printf("ampls: %f %f %f %f %f %f %f %f %f %f\n", amplitudes[idx](0), amplitudes[idx](1), amplitudes[idx](2), amplitudes[idx](3), amplitudes[idx](4), amplitudes[idx](5), amplitudes[idx](6), amplitudes[idx](7), amplitudes[idx](8), amplitudes[idx](9)); } #endif chi2 = chi2_now; if (ecal::abs(deltachi2) < 1e-3) break; //---- AM: TEST //---- it was 3 lines above, now here as in the CPU version ++iter; } // the rest will be set later chi2s[idx] = chi2; } } namespace v1 { void minimization_procedure(EventInputDataCPU const& eventInputCPU, EventInputDataGPU& eventInputGPU, EventOutputDataGPU& eventOutputGPU, EventDataForScratchGPU& scratch, ConditionsProducts const& conditions, ConfigurationParameters const& configParameters, hipStream_t cudaStream) { unsigned int totalChannels = eventInputCPU.ebDigis.size() + eventInputCPU.eeDigis.size(); // unsigned int threads_min = conf.threads.x; // TODO: configure from python unsigned int threads_min = configParameters.kernelMinimizeThreads[0]; unsigned int blocks_min = threads_min > totalChannels ? 1 : (totalChannels + threads_min - 1) / threads_min; hipLaunchKernelGGL(( kernel_minimize), dim3(blocks_min), dim3(threads_min), 0, cudaStream, scratch.noisecov, scratch.pulse_covariances, scratch.activeBXs, scratch.samples, (SampleVector*)eventOutputGPU.amplitudesAll, scratch.pulse_matrix, eventOutputGPU.chi2, scratch.acState, totalChannels, 50); cudaCheck(hipGetLastError()); // // permute computed amplitudes // and assign the final uncalibared energy value // unsigned int threadsPermute = 32 * EcalDataFrame::MAXSAMPLES; // 32 * 10 unsigned int blocksPermute = threadsPermute > 10 * totalChannels ? 1 : (10 * totalChannels + threadsPermute - 1) / threadsPermute; int bytesPermute = threadsPermute * sizeof(SampleVector::Scalar); hipLaunchKernelGGL(( kernel_permute_results), dim3(blocksPermute), dim3(threadsPermute), bytesPermute, cudaStream, (SampleVector*)eventOutputGPU.amplitudesAll, scratch.activeBXs, eventOutputGPU.amplitude, scratch.acState, totalChannels); cudaCheck(hipGetLastError()); } } // namespace v1 } // namespace multifit } // namespace ecal
74e5dbe8b8776464b0ce75c3a5b1a3954a731d9e.cu
#include <iostream> #include <limits> #include "cuda.h" #include "DataFormats/EcalDigi/interface/EcalDataFrame.h" #include "DataFormats/Math/interface/approx_exp.h" #include "DataFormats/Math/interface/approx_log.h" #include "CondFormats/EcalObjects/interface/EcalPulseShapes.h" #include "CondFormats/EcalObjects/interface/EcalPulseCovariances.h" #include "DataFormats/EcalDigi/interface/EcalDigiCollections.h" #include "inplace_fnnls.h" #include "AmplitudeComputationKernelsV1.h" #include "AmplitudeComputationCommonKernels.h" namespace ecal { namespace multifit { void eigen_solve_submatrix(SampleMatrix& mat, SampleVector& invec, SampleVector& outvec, unsigned NP) { using namespace Eigen; switch (NP) { // pulse matrix is always square. case 10: { Matrix<SampleMatrix::Scalar, 10, 10> temp = mat.topLeftCorner<10, 10>(); outvec.head<10>() = temp.ldlt().solve(invec.head<10>()); break; } case 9: { Matrix<SampleMatrix::Scalar, 9, 9> temp = mat.topLeftCorner<9, 9>(); outvec.head<9>() = temp.ldlt().solve(invec.head<9>()); break; } case 8: { Matrix<SampleMatrix::Scalar, 8, 8> temp = mat.topLeftCorner<8, 8>(); outvec.head<8>() = temp.ldlt().solve(invec.head<8>()); break; } case 7: { Matrix<SampleMatrix::Scalar, 7, 7> temp = mat.topLeftCorner<7, 7>(); outvec.head<7>() = temp.ldlt().solve(invec.head<7>()); break; } case 6: { Matrix<SampleMatrix::Scalar, 6, 6> temp = mat.topLeftCorner<6, 6>(); outvec.head<6>() = temp.ldlt().solve(invec.head<6>()); break; } case 5: { Matrix<SampleMatrix::Scalar, 5, 5> temp = mat.topLeftCorner<5, 5>(); outvec.head<5>() = temp.ldlt().solve(invec.head<5>()); break; } case 4: { Matrix<SampleMatrix::Scalar, 4, 4> temp = mat.topLeftCorner<4, 4>(); outvec.head<4>() = temp.ldlt().solve(invec.head<4>()); break; } case 3: { Matrix<SampleMatrix::Scalar, 3, 3> temp = mat.topLeftCorner<3, 3>(); outvec.head<3>() = temp.ldlt().solve(invec.head<3>()); break; } case 2: { Matrix<SampleMatrix::Scalar, 2, 2> temp = mat.topLeftCorner<2, 2>(); outvec.head<2>() = temp.ldlt().solve(invec.head<2>()); break; } case 1: { Matrix<SampleMatrix::Scalar, 1, 1> temp = mat.topLeftCorner<1, 1>(); outvec.head<1>() = temp.ldlt().solve(invec.head<1>()); break; } default: return; } } #define PRINT_MATRIX_10x10(M) \ printf( \ "%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f " \ "%f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f " \ "%f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n%f %f %f %f %f %f %f %f %f %f\n", \ M(0, 0), \ M(1, 0), \ M(2, 0), \ M(3, 0), \ M(4, 0), \ M(5, 0), \ M(6, 0), \ M(7, 0), \ M(8, 0), \ M(9, 0), \ M(0, 1), \ M(1, 1), \ M(2, 1), \ M(3, 1), \ M(4, 1), \ M(5, 1), \ M(6, 1), \ M(7, 1), \ M(8, 1), \ M(9, 1), \ M(0, 2), \ M(1, 2), \ M(2, 2), \ M(3, 2), \ M(4, 2), \ M(5, 2), \ M(6, 2), \ M(7, 2), \ M(8, 2), \ M(9, 2), \ M(0, 3), \ M(1, 3), \ M(2, 3), \ M(3, 3), \ M(4, 3), \ M(5, 3), \ M(6, 3), \ M(7, 3), \ M(8, 3), \ M(9, 3), \ M(0, 4), \ M(1, 4), \ M(2, 4), \ M(3, 4), \ M(4, 4), \ M(5, 4), \ M(6, 4), \ M(7, 4), \ M(8, 4), \ M(9, 4), \ M(0, 5), \ M(1, 5), \ M(2, 5), \ M(3, 5), \ M(4, 5), \ M(5, 5), \ M(6, 5), \ M(7, 5), \ M(8, 5), \ M(9, 5), \ M(0, 6), \ M(1, 6), \ M(2, 6), \ M(3, 6), \ M(4, 6), \ M(5, 6), \ M(6, 6), \ M(7, 6), \ M(8, 6), \ M(9, 6), \ M(0, 7), \ M(1, 7), \ M(2, 7), \ M(3, 7), \ M(4, 7), \ M(5, 7), \ M(6, 7), \ M(7, 7), \ M(8, 7), \ M(9, 7), \ M(0, 8), \ M(1, 8), \ M(2, 8), \ M(3, 8), \ M(4, 8), \ M(5, 8), \ M(6, 8), \ M(7, 8), \ M(8, 8), \ M(9, 8), \ M(0, 9), \ M(1, 9), \ M(2, 9), \ M(3, 9), \ M(4, 9), \ M(5, 9), \ M(6, 9), \ M(7, 9), \ M(8, 9), \ M(9, 9)) __device__ __forceinline__ bool update_covariance(SampleMatrix const& noisecov, FullSampleMatrix const& full_pulse_cov, SampleMatrix& inverse_cov, BXVectorType const& bxs, SampleDecompLLT& covariance_decomposition, SampleVector const& amplitudes) { constexpr int nsamples = SampleVector::RowsAtCompileTime; constexpr int npulses = BXVectorType::RowsAtCompileTime; inverse_cov = noisecov; for (unsigned int ipulse = 0; ipulse < npulses; ipulse++) { if (amplitudes.coeff(ipulse) == 0) continue; int bx = bxs.coeff(ipulse); int first_sample_t = std::max(0, bx + 3); int offset = 7 - 3 - bx; auto const value = amplitudes.coeff(ipulse); auto const value_sq = value * value; unsigned int nsample_pulse = nsamples - first_sample_t; inverse_cov.block(first_sample_t, first_sample_t, nsample_pulse, nsample_pulse) += value_sq * full_pulse_cov.block(first_sample_t + offset, first_sample_t + offset, nsample_pulse, nsample_pulse); } return true; } __device__ __forceinline__ SampleVector::Scalar compute_chi2(SampleDecompLLT& covariance_decomposition, PulseMatrixType const& pulse_matrix, SampleVector const& amplitudes, SampleVector const& samples) { return covariance_decomposition.matrixL().solve(pulse_matrix * amplitudes - samples).squaredNorm(); } /// /// launch ctx parameters are (nchannels / block, blocks) /// TODO: trivial impl for now, there must be a way to improve /// /// Conventions: /// - amplitudes -> solution vector, what we are fitting for /// - samples -> raw detector responses /// - passive constraint - satisfied constraint /// - active constraint - unsatisfied (yet) constraint /// __global__ void kernel_minimize(SampleMatrix const* noisecov, FullSampleMatrix const* full_pulse_cov, BXVectorType* bxs, SampleVector const* samples, SampleVector* amplitudes, PulseMatrixType* pulse_matrix, ::ecal::reco::StorageScalarType* chi2s, char* acState, int nchannels, int max_iterations) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < nchannels) { if (static_cast<MinimizationState>(acState[idx]) == MinimizationState::Precomputed) return; // inits int iter = 0; int npassive = 0; // inits SampleDecompLLT covariance_decomposition; SampleMatrix inverse_cov; SampleVector::Scalar chi2 = 0, chi2_now = 0; #ifdef ECAL_MULTIFIT_KERNEL_MINIMIZE_V1 // PRINT_MATRIX_10x10(noisecov[idx]); #endif // loop until ocnverge while (true) { if (iter >= max_iterations) break; update_covariance( noisecov[idx], full_pulse_cov[idx], inverse_cov, bxs[idx], covariance_decomposition, amplitudes[idx]); // compute actual covariance decomposition covariance_decomposition.compute(inverse_cov); // prepare input matrices for fnnls SampleMatrix A = covariance_decomposition.matrixL().solve(pulse_matrix[idx]); SampleVector b = covariance_decomposition.matrixL().solve(samples[idx]); inplace_fnnls(A, b, amplitudes[idx], npassive, bxs[idx], pulse_matrix[idx]); chi2_now = compute_chi2(covariance_decomposition, pulse_matrix[idx], amplitudes[idx], samples[idx]); auto deltachi2 = chi2_now - chi2; #ifdef ECAL_MULTIFIT_KERNEL_MINIMIZE_V1 if (iter > 10) { printf("idx = %d iter = %d chi2 = %f chi2old = %f\n", idx, iter, chi2_now, chi2); printf("noisecov(0, i): %f %f %f %f %f %f %f %f %f %f\n", noisecov[idx](0, 0), noisecov[idx](0, 1), noisecov[idx](0, 2), noisecov[idx](0, 3), noisecov[idx](0, 4), noisecov[idx](0, 5), noisecov[idx](0, 6), noisecov[idx](0, 7), noisecov[idx](0, 8), noisecov[idx](0, 9)); printf("ampls: %f %f %f %f %f %f %f %f %f %f\n", amplitudes[idx](0), amplitudes[idx](1), amplitudes[idx](2), amplitudes[idx](3), amplitudes[idx](4), amplitudes[idx](5), amplitudes[idx](6), amplitudes[idx](7), amplitudes[idx](8), amplitudes[idx](9)); } #endif chi2 = chi2_now; if (ecal::abs(deltachi2) < 1e-3) break; //---- AM: TEST //---- it was 3 lines above, now here as in the CPU version ++iter; } // the rest will be set later chi2s[idx] = chi2; } } namespace v1 { void minimization_procedure(EventInputDataCPU const& eventInputCPU, EventInputDataGPU& eventInputGPU, EventOutputDataGPU& eventOutputGPU, EventDataForScratchGPU& scratch, ConditionsProducts const& conditions, ConfigurationParameters const& configParameters, cudaStream_t cudaStream) { unsigned int totalChannels = eventInputCPU.ebDigis.size() + eventInputCPU.eeDigis.size(); // unsigned int threads_min = conf.threads.x; // TODO: configure from python unsigned int threads_min = configParameters.kernelMinimizeThreads[0]; unsigned int blocks_min = threads_min > totalChannels ? 1 : (totalChannels + threads_min - 1) / threads_min; kernel_minimize<<<blocks_min, threads_min, 0, cudaStream>>>(scratch.noisecov, scratch.pulse_covariances, scratch.activeBXs, scratch.samples, (SampleVector*)eventOutputGPU.amplitudesAll, scratch.pulse_matrix, eventOutputGPU.chi2, scratch.acState, totalChannels, 50); cudaCheck(cudaGetLastError()); // // permute computed amplitudes // and assign the final uncalibared energy value // unsigned int threadsPermute = 32 * EcalDataFrame::MAXSAMPLES; // 32 * 10 unsigned int blocksPermute = threadsPermute > 10 * totalChannels ? 1 : (10 * totalChannels + threadsPermute - 1) / threadsPermute; int bytesPermute = threadsPermute * sizeof(SampleVector::Scalar); kernel_permute_results<<<blocksPermute, threadsPermute, bytesPermute, cudaStream>>>( (SampleVector*)eventOutputGPU.amplitudesAll, scratch.activeBXs, eventOutputGPU.amplitude, scratch.acState, totalChannels); cudaCheck(cudaGetLastError()); } } // namespace v1 } // namespace multifit } // namespace ecal
3d8ff7e8d544d32d77ee055d3dd90e07a65e9704.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <random> #include <vector> #include <common/cudart_utils.h> #include "linalg/batched/matrix.cuh" #include "linalg_naive.h" #include "sparse/batched/csr.cuh" #include "test_utils.h" namespace MLCommon { namespace Sparse { namespace Batched { enum CSROperation { SpMV_op, SpMM_op }; template <typename T> struct CSRInputs { CSROperation operation; int batch_size; int m; // Dimensions of A int n; int nnz; // Number of non-zero elements in A int p; // Dimensions of B or x int q; T alpha; // Scalars T beta; T tolerance; }; template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<CSRInputs<T>>::GetParam(); // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case SpMV_op: ASSERT_TRUE(params.n == params.p); ASSERT_TRUE(params.q == 1); m_r = params.m; n_r = 1; break; case SpMM_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; } // Create test matrices/vectors std::vector<T> A; std::vector<T> Bx; A.resize(params.batch_size * params.m * params.n, (T)0.0); Bx.resize(params.batch_size * params.p * params.q); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> idis(0, params.m * params.n - 1); std::uniform_real_distribution<T> udis(-1.0, 3.0); // Generate a random sparse matrix (with dense representation) std::vector<bool> mask = std::vector<bool>(params.m * params.n, false); for (int idx = 0; idx < params.nnz; idx++) { int k; do { k = idis(gen); } while (mask[k]); mask[k] = true; int i = k % params.m; int j = k / params.m; for (int bid = 0; bid < params.batch_size; bid++) { A[bid * params.m * params.n + j * params.m + i] = udis(gen); } } // Generate random dense matrices/vectors for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen); res_h.resize(params.batch_size * m_r * n_r); for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(hipblasCreate(&handle)); CUDA_CHECK(hipStreamCreate(&stream)); CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle)); auto allocator = std::make_shared<MLCommon::defaultDeviceAllocator>(); // Created batched dense matrices LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size, handle, allocator, stream); // Create matrix that will hold the results res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle, allocator, stream); // Copy the data to the device updateDevice(AbM.raw_data(), A.data(), A.size(), stream); updateDevice(BxbM.raw_data(), Bx.data(), Bx.size(), stream); updateDevice(res_bM->raw_data(), res_h.data(), res_h.size(), stream); // Create sparse matrix A from the dense A and the mask CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle); // Compute the tested results switch (params.operation) { case SpMV_op: b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM); break; case SpMM_op: b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM); break; } // Compute the expected results switch (params.operation) { case SpMV_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p, params.m, params.n, 1, params.alpha, params.beta); } break; case SpMM_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p * params.q, params.m, params.n, params.q, params.alpha, params.beta); } break; } CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(hipblasDestroy(handle)); CUDA_CHECK(hipStreamDestroy(stream)); CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle)); } protected: CSRInputs<T> params; LinAlg::Batched::Matrix<T> *res_bM; std::vector<T> res_h; hipblasHandle_t handle; cusolverSpHandle_t cusolverSpHandle; hipStream_t stream; }; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<double>> inputsd = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}}; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<float>> inputsf = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}}; using BatchedCSRTestD = CSRTest<double>; using BatchedCSRTestF = CSRTest<float>; TEST_P(BatchedCSRTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedCSRTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Sparse } // namespace MLCommon
3d8ff7e8d544d32d77ee055d3dd90e07a65e9704.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <cuml/common/cuml_allocator.hpp> #include <cuml/cuml.hpp> #include <random> #include <vector> #include <common/cudart_utils.h> #include "linalg/batched/matrix.cuh" #include "linalg_naive.h" #include "sparse/batched/csr.cuh" #include "test_utils.h" namespace MLCommon { namespace Sparse { namespace Batched { enum CSROperation { SpMV_op, SpMM_op }; template <typename T> struct CSRInputs { CSROperation operation; int batch_size; int m; // Dimensions of A int n; int nnz; // Number of non-zero elements in A int p; // Dimensions of B or x int q; T alpha; // Scalars T beta; T tolerance; }; template <typename T> class CSRTest : public ::testing::TestWithParam<CSRInputs<T>> { protected: void SetUp() override { using std::vector; params = ::testing::TestWithParam<CSRInputs<T>>::GetParam(); // Check if the dimensions are valid and compute the output dimensions int m_r, n_r; switch (params.operation) { case SpMV_op: ASSERT_TRUE(params.n == params.p); ASSERT_TRUE(params.q == 1); m_r = params.m; n_r = 1; break; case SpMM_op: ASSERT_TRUE(params.n == params.p); m_r = params.m; n_r = params.q; break; } // Create test matrices/vectors std::vector<T> A; std::vector<T> Bx; A.resize(params.batch_size * params.m * params.n, (T)0.0); Bx.resize(params.batch_size * params.p * params.q); std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<T> idis(0, params.m * params.n - 1); std::uniform_real_distribution<T> udis(-1.0, 3.0); // Generate a random sparse matrix (with dense representation) std::vector<bool> mask = std::vector<bool>(params.m * params.n, false); for (int idx = 0; idx < params.nnz; idx++) { int k; do { k = idis(gen); } while (mask[k]); mask[k] = true; int i = k % params.m; int j = k / params.m; for (int bid = 0; bid < params.batch_size; bid++) { A[bid * params.m * params.n + j * params.m + i] = udis(gen); } } // Generate random dense matrices/vectors for (int i = 0; i < Bx.size(); i++) Bx[i] = udis(gen); res_h.resize(params.batch_size * m_r * n_r); for (int i = 0; i < res_h.size(); i++) res_h[i] = udis(gen); // Create handles, stream, allocator CUBLAS_CHECK(cublasCreate(&handle)); CUDA_CHECK(cudaStreamCreate(&stream)); CUSOLVER_CHECK(cusolverSpCreate(&cusolverSpHandle)); auto allocator = std::make_shared<MLCommon::defaultDeviceAllocator>(); // Created batched dense matrices LinAlg::Batched::Matrix<T> AbM(params.m, params.n, params.batch_size, handle, allocator, stream); LinAlg::Batched::Matrix<T> BxbM(params.p, params.q, params.batch_size, handle, allocator, stream); // Create matrix that will hold the results res_bM = new LinAlg::Batched::Matrix<T>(m_r, n_r, params.batch_size, handle, allocator, stream); // Copy the data to the device updateDevice(AbM.raw_data(), A.data(), A.size(), stream); updateDevice(BxbM.raw_data(), Bx.data(), Bx.size(), stream); updateDevice(res_bM->raw_data(), res_h.data(), res_h.size(), stream); // Create sparse matrix A from the dense A and the mask CSR<T> AbS = CSR<T>::from_dense(AbM, mask, cusolverSpHandle); // Compute the tested results switch (params.operation) { case SpMV_op: b_spmv(params.alpha, AbS, BxbM, params.beta, *res_bM); break; case SpMM_op: b_spmm(params.alpha, AbS, BxbM, params.beta, *res_bM); break; } // Compute the expected results switch (params.operation) { case SpMV_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p, params.m, params.n, 1, params.alpha, params.beta); } break; case SpMM_op: for (int bid = 0; bid < params.batch_size; bid++) { LinAlg::Naive::matMul(res_h.data() + bid * m_r * n_r, A.data() + bid * params.m * params.n, Bx.data() + bid * params.p * params.q, params.m, params.n, params.q, params.alpha, params.beta); } break; } CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { delete res_bM; CUBLAS_CHECK(cublasDestroy(handle)); CUDA_CHECK(cudaStreamDestroy(stream)); CUSOLVER_CHECK(cusolverSpDestroy(cusolverSpHandle)); } protected: CSRInputs<T> params; LinAlg::Batched::Matrix<T> *res_bM; std::vector<T> res_h; cublasHandle_t handle; cusolverSpHandle_t cusolverSpHandle; cudaStream_t stream; }; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<double>> inputsd = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0, 0.0, 1e-6}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0, 1.0, 1e-6}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5, 0.5, 1e-6}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5, -0.5, 1e-6}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0, 0.0, 1e-6}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0, 0.5, 1e-6}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5, 0.5, 1e-6}}; // Test parameters (op, batch_size, m, n, nnz, p, q, tolerance) const std::vector<CSRInputs<float>> inputsf = { {SpMV_op, 1, 90, 150, 440, 150, 1, 1.0f, 0.0f, 1e-2}, {SpMV_op, 5, 13, 12, 75, 12, 1, -1.0f, 1.0f, 1e-2}, {SpMV_op, 15, 8, 4, 6, 4, 1, 0.5f, 0.5f, 1e-2}, {SpMV_op, 33, 7, 7, 23, 7, 1, -0.5f, -0.5f, 1e-2}, {SpMM_op, 1, 20, 15, 55, 15, 30, 1.0f, 0.0f, 1e-2}, {SpMM_op, 9, 10, 9, 31, 9, 11, -1.0f, 0.5f, 1e-2}, {SpMM_op, 20, 7, 12, 11, 12, 13, 0.5f, 0.5f, 1e-2}}; using BatchedCSRTestD = CSRTest<double>; using BatchedCSRTestF = CSRTest<float>; TEST_P(BatchedCSRTestD, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), CompareApprox<double>(params.tolerance), stream)); } TEST_P(BatchedCSRTestF, Result) { ASSERT_TRUE(devArrMatchHost(res_h.data(), res_bM->raw_data(), res_h.size(), CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestD, ::testing::ValuesIn(inputsd)); INSTANTIATE_TEST_CASE_P(BatchedCSRTests, BatchedCSRTestF, ::testing::ValuesIn(inputsf)); } // namespace Batched } // namespace Sparse } // namespace MLCommon
f06922228f8709af4dcfd8acf23fc2bcff843295.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Created on: Sep 21, 2018 * Author: Akila, Eranga, Eminda, Ruwan **/ #include "getOutput.cuh" __global__ void calc_get_output (double * dev_var_out, const unsigned int host_sz_x, const unsigned int host_sz_y, const unsigned int host_sz_z, #include "para_derivs_offsets.h" ) { int thread_id = blockIdx.x*1024 + threadIdx.x; int i = thread_id%(host_sz_x-6) + 3; int j = ((thread_id/(host_sz_x-6))%(host_sz_y-6)) + 3; int k = (thread_id/(host_sz_y-6)/(host_sz_x-6)) + 3; int nx = host_sz_x; int ny = host_sz_y; if(i >= nx-3 || j >= ny-3 || k >= host_sz_z-3) return; const double sigma = 1e-4; int pp = i + nx*(j + ny*k); dev_var_out[alphaInt + pp] += sigma * (grad_0_alpha[pp] + grad_1_alpha[pp] + grad_2_alpha[pp]); dev_var_out[beta0Int + pp] += sigma * (grad_0_beta0[pp] + grad_1_beta0[pp] + grad_2_beta0[pp]); dev_var_out[beta1Int + pp] += sigma * (grad_0_beta1[pp] + grad_1_beta1[pp] + grad_2_beta1[pp]); dev_var_out[beta2Int + pp] += sigma * (grad_0_beta2[pp] + grad_1_beta2[pp] + grad_2_beta2[pp]); dev_var_out[gt0Int + pp] += sigma * (grad_0_gt0[pp] + grad_1_gt0[pp] + grad_2_gt0[pp]); dev_var_out[gt1Int + pp] += sigma * (grad_0_gt1[pp] + grad_1_gt1[pp] + grad_2_gt1[pp]); dev_var_out[gt2Int + pp] += sigma * (grad_0_gt2[pp] + grad_1_gt2[pp] + grad_2_gt2[pp]); dev_var_out[gt3Int + pp] += sigma * (grad_0_gt3[pp] + grad_1_gt3[pp] + grad_2_gt3[pp]); dev_var_out[gt4Int + pp] += sigma * (grad_0_gt4[pp] + grad_1_gt4[pp] + grad_2_gt4[pp]); dev_var_out[gt5Int + pp] += sigma * (grad_0_gt5[pp] + grad_1_gt5[pp] + grad_2_gt5[pp]); dev_var_out[chiInt + pp] += sigma * (grad_0_chi[pp] + grad_1_chi[pp] + grad_2_chi[pp]); dev_var_out[At0Int + pp] += sigma * (grad_0_At0[pp] + grad_1_At0[pp] + grad_2_At0[pp]); dev_var_out[At1Int + pp] += sigma * (grad_0_At1[pp] + grad_1_At1[pp] + grad_2_At1[pp]); dev_var_out[At2Int + pp] += sigma * (grad_0_At2[pp] + grad_1_At2[pp] + grad_2_At2[pp]); dev_var_out[At3Int + pp] += sigma * (grad_0_At3[pp] + grad_1_At3[pp] + grad_2_At3[pp]); dev_var_out[At4Int + pp] += sigma * (grad_0_At4[pp] + grad_1_At4[pp] + grad_2_At4[pp]); dev_var_out[At5Int + pp] += sigma * (grad_0_At5[pp] + grad_1_At5[pp] + grad_2_At5[pp]); dev_var_out[KInt + pp] += sigma * (grad_0_K[pp] + grad_1_K[pp] + grad_2_K[pp]); dev_var_out[Gt0Int + pp] += sigma * (grad_0_Gt0[pp] + grad_1_Gt0[pp] + grad_2_Gt0[pp]); dev_var_out[Gt1Int + pp] += sigma * (grad_0_Gt1[pp] + grad_1_Gt1[pp] + grad_2_Gt1[pp]); dev_var_out[Gt2Int + pp] += sigma * (grad_0_Gt2[pp] + grad_1_Gt2[pp] + grad_2_Gt2[pp]); dev_var_out[B0Int + pp] += sigma * (grad_0_B0[pp] + grad_1_B0[pp] + grad_2_B0[pp]); dev_var_out[B1Int + pp] += sigma * (grad_0_B1[pp] + grad_1_B1[pp] + grad_2_B1[pp]); dev_var_out[B2Int + pp] += sigma * (grad_0_B2[pp] + grad_1_B2[pp] + grad_2_B2[pp]); } void get_output_kernel_wrapper(double * dev_var_out, const unsigned int * host_sz, hipStream_t stream, #include "para_derivs_offsets.h" ) { const int ie = host_sz[0] - 3;//x direction const int je = host_sz[1] - 3;//y direction const int ke = host_sz[2] - 3;//z direction const unsigned int host_sz_x = host_sz[0]; const unsigned int host_sz_y = host_sz[1]; const unsigned int host_sz_z = host_sz[2]; int total_points = ceil(1.0*ie*je*ke); int blocks = ceil(1.0*total_points/1024); hipLaunchKernelGGL(( calc_get_output) , dim3(blocks), dim3(1024), 0, stream , dev_var_out, host_sz_x, host_sz_y, host_sz_z, #include "args_derivs_offsets.h" ); CHECK_ERROR(hipGetLastError(), "kernal_get_output Kernel launch failed"); }
f06922228f8709af4dcfd8acf23fc2bcff843295.cu
/** * Created on: Sep 21, 2018 * Author: Akila, Eranga, Eminda, Ruwan **/ #include "getOutput.cuh" __global__ void calc_get_output (double * dev_var_out, const unsigned int host_sz_x, const unsigned int host_sz_y, const unsigned int host_sz_z, #include "para_derivs_offsets.h" ) { int thread_id = blockIdx.x*1024 + threadIdx.x; int i = thread_id%(host_sz_x-6) + 3; int j = ((thread_id/(host_sz_x-6))%(host_sz_y-6)) + 3; int k = (thread_id/(host_sz_y-6)/(host_sz_x-6)) + 3; int nx = host_sz_x; int ny = host_sz_y; if(i >= nx-3 || j >= ny-3 || k >= host_sz_z-3) return; const double sigma = 1e-4; int pp = i + nx*(j + ny*k); dev_var_out[alphaInt + pp] += sigma * (grad_0_alpha[pp] + grad_1_alpha[pp] + grad_2_alpha[pp]); dev_var_out[beta0Int + pp] += sigma * (grad_0_beta0[pp] + grad_1_beta0[pp] + grad_2_beta0[pp]); dev_var_out[beta1Int + pp] += sigma * (grad_0_beta1[pp] + grad_1_beta1[pp] + grad_2_beta1[pp]); dev_var_out[beta2Int + pp] += sigma * (grad_0_beta2[pp] + grad_1_beta2[pp] + grad_2_beta2[pp]); dev_var_out[gt0Int + pp] += sigma * (grad_0_gt0[pp] + grad_1_gt0[pp] + grad_2_gt0[pp]); dev_var_out[gt1Int + pp] += sigma * (grad_0_gt1[pp] + grad_1_gt1[pp] + grad_2_gt1[pp]); dev_var_out[gt2Int + pp] += sigma * (grad_0_gt2[pp] + grad_1_gt2[pp] + grad_2_gt2[pp]); dev_var_out[gt3Int + pp] += sigma * (grad_0_gt3[pp] + grad_1_gt3[pp] + grad_2_gt3[pp]); dev_var_out[gt4Int + pp] += sigma * (grad_0_gt4[pp] + grad_1_gt4[pp] + grad_2_gt4[pp]); dev_var_out[gt5Int + pp] += sigma * (grad_0_gt5[pp] + grad_1_gt5[pp] + grad_2_gt5[pp]); dev_var_out[chiInt + pp] += sigma * (grad_0_chi[pp] + grad_1_chi[pp] + grad_2_chi[pp]); dev_var_out[At0Int + pp] += sigma * (grad_0_At0[pp] + grad_1_At0[pp] + grad_2_At0[pp]); dev_var_out[At1Int + pp] += sigma * (grad_0_At1[pp] + grad_1_At1[pp] + grad_2_At1[pp]); dev_var_out[At2Int + pp] += sigma * (grad_0_At2[pp] + grad_1_At2[pp] + grad_2_At2[pp]); dev_var_out[At3Int + pp] += sigma * (grad_0_At3[pp] + grad_1_At3[pp] + grad_2_At3[pp]); dev_var_out[At4Int + pp] += sigma * (grad_0_At4[pp] + grad_1_At4[pp] + grad_2_At4[pp]); dev_var_out[At5Int + pp] += sigma * (grad_0_At5[pp] + grad_1_At5[pp] + grad_2_At5[pp]); dev_var_out[KInt + pp] += sigma * (grad_0_K[pp] + grad_1_K[pp] + grad_2_K[pp]); dev_var_out[Gt0Int + pp] += sigma * (grad_0_Gt0[pp] + grad_1_Gt0[pp] + grad_2_Gt0[pp]); dev_var_out[Gt1Int + pp] += sigma * (grad_0_Gt1[pp] + grad_1_Gt1[pp] + grad_2_Gt1[pp]); dev_var_out[Gt2Int + pp] += sigma * (grad_0_Gt2[pp] + grad_1_Gt2[pp] + grad_2_Gt2[pp]); dev_var_out[B0Int + pp] += sigma * (grad_0_B0[pp] + grad_1_B0[pp] + grad_2_B0[pp]); dev_var_out[B1Int + pp] += sigma * (grad_0_B1[pp] + grad_1_B1[pp] + grad_2_B1[pp]); dev_var_out[B2Int + pp] += sigma * (grad_0_B2[pp] + grad_1_B2[pp] + grad_2_B2[pp]); } void get_output_kernel_wrapper(double * dev_var_out, const unsigned int * host_sz, cudaStream_t stream, #include "para_derivs_offsets.h" ) { const int ie = host_sz[0] - 3;//x direction const int je = host_sz[1] - 3;//y direction const int ke = host_sz[2] - 3;//z direction const unsigned int host_sz_x = host_sz[0]; const unsigned int host_sz_y = host_sz[1]; const unsigned int host_sz_z = host_sz[2]; int total_points = ceil(1.0*ie*je*ke); int blocks = ceil(1.0*total_points/1024); calc_get_output <<< blocks, 1024, 0, stream >>> (dev_var_out, host_sz_x, host_sz_y, host_sz_z, #include "args_derivs_offsets.h" ); CHECK_ERROR(cudaGetLastError(), "kernal_get_output Kernel launch failed"); }
fe4747625c262cbfc5da3923fe0cd85a2549e0a4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include <time.h> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp>1 #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include "device_launch_parameters.h" #include "GpuTimer.h" #define NUM_TREADS 1024 using namespace cv; using namespace std; // cpu implementation void rgb2grayCPU(unsigned char* color, unsigned char* gray, int numRows, int numCols, int numChannels) { int grayOffset, colorOffset; ///recorre secuencialmente todos los pixeles de la imagen for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { // linearize pixel coordinate tuple (i, j) //formula para el offset grayOffset = i * numCols + j; colorOffset = grayOffset * numChannels; //formula para convertir a gris gray[grayOffset] = (0.21 * color[colorOffset + 2]) + (0.71 * color[colorOffset + 1]) + (0.07 * color[colorOffset]); } } } // gpu implementation __global__ void rgb2grayGPU(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels) { // coordenadas int row = threadIdx.y + blockIdx.y*blockDim.y; int col = threadIdx.x + blockIdx.x*blockDim.x; // linearize coordinates for data access //formula para el offset tomando en cuenta las coordenadas del thread int grayOffset = row * width + col; int colorOffset = grayOffset * numChannels; ///verifica que el pixel a evaluar exista if ((col < width) && (row < height)) { ///convierte a gris Pout[grayOffset] = (0.21 * Pin[colorOffset + 2]) + (0.71 * Pin[colorOffset + 1]) + (0.07 * Pin[colorOffset]); } } __global__ void colorToGrayscaleConversion(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels){ //coordenadas int col = threadIdx.x + blockIdx.x*blockDim.x; int row = threadIdx.y + blockIdx.y*blockDim.y; if(col < with && row < height) { //formula para el offset teniendo en cuenta las coordenadas int greyOffset = row*width + col; int rgbOffset = greyOffset* numChannels; //consigue valores en rgb unsigned char r = Pin [rgbOffset ]; unsigned char g = Pin [rgbOffset+1]; unsigned char b = Pin [rgbOffset+2]; //convierte a gris Pout[grayOffset] = 0.21f*r +0.71f*g +0.07f*b; } } int main(int argc, char *argv[]) { if (argc == 1) { printf("[!] Filename expected.\n"); return 0; } // read image Mat image; image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (image.empty()) { printf("Cannot read image file %s", argv[1]); exit(1); } // parametros int imageChannels = 3; //rgb int imageWidth = image.cols; int imageHeight = image.rows; size_t size_rgb = sizeof(unsigned char)*imageWidth*imageHeight*imageChannels; size_t size_gray = sizeof(unsigned char)*imageWidth*imageHeight; // reserva memoria para imagenes en host unsigned char* h_grayImage = (unsigned char*)malloc(size_rgb); unsigned char* h_grayImage_CPU = (unsigned char*)malloc(size_rgb); // puntero a la imagen rgb en host unsigned char* h_rgbImage = image.data; // reserva memoria para imagenes en device unsigned char* d_rgbImage; unsigned char* d_grayImage; hipMalloc((void**)&d_rgbImage, size_rgb); hipMalloc((void**)&d_grayImage, size_gray); // copia la imagen rgb de host a device hipMemcpy(d_rgbImage, h_rgbImage, size_rgb, hipMemcpyHostToDevice); // parametros de ejecucion dim3 dimBlock(16, 16, 1); dim3 dimGrid(ceil(imageWidth/16.0), ceil(imageHeight/16.0), 1); //dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); //dim3 dimGrid(ceil(imageWidth/NUM_THREADS), ceil(imageHeight/NUM_THREADS), 1); //ejecucion //rgb2grayGPU<<<dimGrid, dimBlock>>>(d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels); hipLaunchKernelGGL(( colorToGrayscaleConversion), dim3(dimGrid), dim3(dimBlock), 0, 0, h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels); // copia la imagen en gris del device al host hipMemcpy(h_grayImage, d_grayImage, size_gray, hipMemcpyDeviceToHost); // display images Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage); Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU); namedWindow("CPUImage", WINDOW_NORMAL); namedWindow("GPUImage", WINDOW_NORMAL); imshow("GPUImage",Image1); imshow("CPUImage",Image2); waitKey(0); // libera espacios de memoria image.release(); Image1.release(); Image2.release(); free(h_grayImage); free(h_grayImage_CPU); hipFree(d_rgbImage); hipFree(d_grayImage); return 0; }
fe4747625c262cbfc5da3923fe0cd85a2549e0a4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <time.h> #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp>1 #include <opencv2/highgui/highgui.hpp> #include <opencv2/opencv.hpp> #include "device_launch_parameters.h" #include "GpuTimer.h" #define NUM_TREADS 1024 using namespace cv; using namespace std; // cpu implementation void rgb2grayCPU(unsigned char* color, unsigned char* gray, int numRows, int numCols, int numChannels) { int grayOffset, colorOffset; ///recorre secuencialmente todos los pixeles de la imagen for (int i = 0; i < numRows; i++) { for (int j = 0; j < numCols; j++) { // linearize pixel coordinate tuple (i, j) //formula para el offset grayOffset = i * numCols + j; colorOffset = grayOffset * numChannels; //formula para convertir a gris gray[grayOffset] = (0.21 * color[colorOffset + 2]) + (0.71 * color[colorOffset + 1]) + (0.07 * color[colorOffset]); } } } // gpu implementation __global__ void rgb2grayGPU(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels) { // coordenadas int row = threadIdx.y + blockIdx.y*blockDim.y; int col = threadIdx.x + blockIdx.x*blockDim.x; // linearize coordinates for data access //formula para el offset tomando en cuenta las coordenadas del thread int grayOffset = row * width + col; int colorOffset = grayOffset * numChannels; ///verifica que el pixel a evaluar exista if ((col < width) && (row < height)) { ///convierte a gris Pout[grayOffset] = (0.21 * Pin[colorOffset + 2]) + (0.71 * Pin[colorOffset + 1]) + (0.07 * Pin[colorOffset]); } } __global__ void colorToGrayscaleConversion(unsigned char* Pout, unsigned char* Pin, int width, int height, int numChannels){ //coordenadas int col = threadIdx.x + blockIdx.x*blockDim.x; int row = threadIdx.y + blockIdx.y*blockDim.y; if(col < with && row < height) { //formula para el offset teniendo en cuenta las coordenadas int greyOffset = row*width + col; int rgbOffset = greyOffset* numChannels; //consigue valores en rgb unsigned char r = Pin [rgbOffset ]; unsigned char g = Pin [rgbOffset+1]; unsigned char b = Pin [rgbOffset+2]; //convierte a gris Pout[grayOffset] = 0.21f*r +0.71f*g +0.07f*b; } } int main(int argc, char *argv[]) { if (argc == 1) { printf("[!] Filename expected.\n"); return 0; } // read image Mat image; image = imread(argv[1], CV_LOAD_IMAGE_COLOR); if (image.empty()) { printf("Cannot read image file %s", argv[1]); exit(1); } // parametros int imageChannels = 3; //rgb int imageWidth = image.cols; int imageHeight = image.rows; size_t size_rgb = sizeof(unsigned char)*imageWidth*imageHeight*imageChannels; size_t size_gray = sizeof(unsigned char)*imageWidth*imageHeight; // reserva memoria para imagenes en host unsigned char* h_grayImage = (unsigned char*)malloc(size_rgb); unsigned char* h_grayImage_CPU = (unsigned char*)malloc(size_rgb); // puntero a la imagen rgb en host unsigned char* h_rgbImage = image.data; // reserva memoria para imagenes en device unsigned char* d_rgbImage; unsigned char* d_grayImage; cudaMalloc((void**)&d_rgbImage, size_rgb); cudaMalloc((void**)&d_grayImage, size_gray); // copia la imagen rgb de host a device cudaMemcpy(d_rgbImage, h_rgbImage, size_rgb, cudaMemcpyHostToDevice); // parametros de ejecucion dim3 dimBlock(16, 16, 1); dim3 dimGrid(ceil(imageWidth/16.0), ceil(imageHeight/16.0), 1); //dim3 dimBlock(NUM_THREADS, NUM_THREADS, 1); //dim3 dimGrid(ceil(imageWidth/NUM_THREADS), ceil(imageHeight/NUM_THREADS), 1); //ejecucion //rgb2grayGPU<<<dimGrid, dimBlock>>>(d_grayImage, d_rgbImage, imageWidth, imageHeight, imageChannels); colorToGrayscaleConversion<<<dimGrid, dimBlock>>>(h_rgbImage, h_grayImage_CPU, imageHeight, imageWidth, imageChannels); // copia la imagen en gris del device al host cudaMemcpy(h_grayImage, d_grayImage, size_gray, cudaMemcpyDeviceToHost); // display images Mat Image1(imageHeight, imageWidth, CV_8UC1, h_grayImage); Mat Image2(imageHeight, imageWidth, CV_8UC1, h_grayImage_CPU); namedWindow("CPUImage", WINDOW_NORMAL); namedWindow("GPUImage", WINDOW_NORMAL); imshow("GPUImage",Image1); imshow("CPUImage",Image2); waitKey(0); // libera espacios de memoria image.release(); Image1.release(); Image2.release(); free(h_grayImage); free(h_grayImage_CPU); cudaFree(d_rgbImage); cudaFree(d_grayImage); return 0; }
2c9baa11ac2ec2c7ee08e3103adfc1e76fa38c6c.hip
// !!! This is a file automatically generated by hipify!!! /* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * D. Bailey * W. Saphir * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <[email protected]> * Gabriell Araujo <[email protected]> * Jnior Lff <[email protected]> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <[email protected]> * * ------------------------------------------------------------------------------ */ #include <omp.h> #include <hip/hip_runtime.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" /* * --------------------------------------------------------------------- * u0, u1, u2 are the main arrays in the problem. * depending on the decomposition, these arrays will have different * dimensions. to accomodate all possibilities, we allocate them as * one-dimensional arrays and pass them to subroutines for different * views * - u0 contains the initial (transformed) initial condition * - u1 and u2 are working arrays * - twiddle contains exponents for the time evolution operator. * --------------------------------------------------------------------- * large arrays are in common so that they are allocated on the * heap rather than the stack. this common block is not * referenced directly anywhere else. padding is to avoid accidental * cache problems, since all array sizes are powers of two. * --------------------------------------------------------------------- * we need a bunch of logic to keep track of how * arrays are laid out. * * note: this serial version is the derived from the parallel 0D case * of the ft NPB. * the computation proceeds logically as * * set up initial conditions * fftx(1) * transpose (1->2) * ffty(2) * transpose (2->3) * fftz(3) * time evolution * fftz(3) * transpose (3->2) * ffty(2) * transpose (2->1) * fftx(1) * compute residual(1) * * for the 0D, 1D, 2D strategies, the layouts look like xxx * * 0D 1D 2D * 1: xyz xyz xyz * 2: xyz xyz yxz * 3: xyz zyx zxy * the array dimensions are stored in dims(coord, phase) * --------------------------------------------------------------------- * if processor array is 1x1 -> 0D grid decomposition * * cache blocking params. these values are good for most * RISC processors. * FFT parameters: * fftblock controls how many ffts are done at a time. * the default is appropriate for most cache-based machines * on vector machines, the FFT can be vectorized with vector * length equal to the block size, so the block size should * be as large as possible. this is the size of the smallest * dimension of the problem: 128 for class A, 256 for class B * and 512 for class C. * --------------------------------------------------------------------- */ #define FFTBLOCK_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCKPAD_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCK (FFTBLOCK_DEFAULT) #define FFTBLOCKPAD (FFTBLOCKPAD_DEFAULT) #define SEED (314159265.0) #define A (1220703125.0) #define PI (3.141592653589793238) #define ALPHA (1.0e-6) #define AP (-4.0*ALPHA*PI*PI) #define OMP_THREADS (3) #define TASK_INDEXMAP (0) #define TASK_INITIAL_CONDITIONS (1) #define TASK_INIT_UI (2) #define PROFILING_TOTAL_TIME (0) #define PROFILING_INDEXMAP (1) #define PROFILING_INITIAL_CONDITIONS (2) #define PROFILING_INIT_UI (3) #define PROFILING_EVOLVE (4) #define PROFILING_FFTX_1 (5) #define PROFILING_FFTX_2 (6) #define PROFILING_FFTX_3 (7) #define PROFILING_FFTY_1 (8) #define PROFILING_FFTY_2 (9) #define PROFILING_FFTY_3 (10) #define PROFILING_FFTZ_1 (11) #define PROFILING_FFTZ_2 (12) #define PROFILING_FFTZ_3 (13) #define PROFILING_CHECKSUM (14) #define PROFILING_INIT (15) #define CHECKSUM_TASKS (1024) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static dcomplex sums[NITER_DEFAULT+1]; static double twiddle[NTOTAL]; static dcomplex u[MAXDIM]; static dcomplex u0[NTOTAL]; static dcomplex u1[NTOTAL]; static int dims[3]; #else static dcomplex (*sums)=(dcomplex*)malloc(sizeof(dcomplex)*(NITER_DEFAULT+1)); static double (*twiddle)=(double*)malloc(sizeof(double)*(NTOTAL)); static dcomplex (*u)=(dcomplex*)malloc(sizeof(dcomplex)*(MAXDIM)); static dcomplex (*u0)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static dcomplex (*u1)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static int (*dims)=(int*)malloc(sizeof(int)*(3)); #endif static int niter; /* gpu variables */ double* starts_device; double* twiddle_device; dcomplex* sums_device; dcomplex* u_device; dcomplex* u0_device; dcomplex* u1_device; dcomplex* u2_device; dcomplex* y0_device; dcomplex* y1_device; size_t size_sums_device; size_t size_starts_device; size_t size_twiddle_device; size_t size_u_device; size_t size_u0_device; size_t size_u1_device; size_t size_y0_device; size_t size_y1_device; size_t size_shared_data; int blocks_per_grid_on_compute_indexmap; int blocks_per_grid_on_compute_initial_conditions; int blocks_per_grid_on_init_ui; int blocks_per_grid_on_evolve; int blocks_per_grid_on_fftx_1; int blocks_per_grid_on_fftx_2; int blocks_per_grid_on_fftx_3; int blocks_per_grid_on_ffty_1; int blocks_per_grid_on_ffty_2; int blocks_per_grid_on_ffty_3; int blocks_per_grid_on_fftz_1; int blocks_per_grid_on_fftz_2; int blocks_per_grid_on_fftz_3; int blocks_per_grid_on_checksum; int threads_per_block_on_compute_indexmap; int threads_per_block_on_compute_initial_conditions; int threads_per_block_on_init_ui; int threads_per_block_on_evolve; int threads_per_block_on_fftx_1; int threads_per_block_on_fftx_2; int threads_per_block_on_fftx_3; int threads_per_block_on_ffty_1; int threads_per_block_on_ffty_2; int threads_per_block_on_ffty_3; int threads_per_block_on_fftz_1; int threads_per_block_on_fftz_2; int threads_per_block_on_fftz_3; int threads_per_block_on_checksum; int gpu_device_id; int total_devices; hipDeviceProp_t gpu_device_properties; extern __shared__ double extern_share_data[]; /* function declarations */ static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts1_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts2_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg); __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg); __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts3_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void checksum_gpu(int iteration, dcomplex u1[]); __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]); static void compute_indexmap_gpu(double twiddle[]); __global__ void compute_indexmap_gpu_kernel(double twiddle[]); static void compute_initial_conditions_gpu(dcomplex u0[]); __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]); static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]); static void fft_init_gpu(int n); static int ilog2(int n); __device__ int ilog2_device(int n); static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void ipow46(double a, int exponent, double* result); __device__ void ipow46_device(double a, int exponent, double* result); __device__ double randlc_device(double* x, double a); static void release_gpu(); static void setup(); static void setup_gpu(); static void verify (int d1, int d2, int d3, int nt, boolean* verified, char* class_npb); __device__ void vranlc_device(int n, double* x_seed, double a, double y[]); /* ft */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif int iter=0; double total_time, mflops; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * run the entire problem once to make sure all data is touched. * this reduces variable startup costs, which is important for such a * short benchmark. the other NPB 2 implementations are similar. * --------------------------------------------------------------------- */ setup(); setup_gpu(); init_ui_gpu(u0_device, u1_device, twiddle_device); #pragma omp parallel { if(omp_get_thread_num()==TASK_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==TASK_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==TASK_INIT_UI){ fft_init_gpu(MAXDIM); } }hipDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); /* * --------------------------------------------------------------------- * start over from the beginning. note that all operations must * be timed, in contrast to other benchmarks. * --------------------------------------------------------------------- */ timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_INDEXMAP); timer_clear(PROFILING_INITIAL_CONDITIONS); timer_clear(PROFILING_INITIAL_CONDITIONS); timer_clear(PROFILING_EVOLVE); timer_clear(PROFILING_FFTX_1); timer_clear(PROFILING_FFTX_2); timer_clear(PROFILING_FFTX_3); timer_clear(PROFILING_FFTY_1); timer_clear(PROFILING_FFTY_2); timer_clear(PROFILING_FFTY_3); timer_clear(PROFILING_FFTZ_1); timer_clear(PROFILING_FFTZ_2); timer_clear(PROFILING_FFTZ_3); timer_clear(PROFILING_CHECKSUM); #endif timer_start(PROFILING_TOTAL_TIME); #pragma omp parallel { if(omp_get_thread_num()==TASK_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==TASK_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==TASK_INIT_UI){ fft_init_gpu(MAXDIM); } }hipDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); for(iter=1; iter<=niter; iter++){ evolve_gpu(u0_device, u1_device, twiddle_device); fft_gpu(-1, u1_device, u1_device); checksum_gpu(iter, u1_device); } hipMemcpy(sums, sums_device, size_sums_device, hipMemcpyDeviceToHost); for(iter=1; iter<=niter; iter++){ printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums[iter].real, sums[iter].imag); } verify(NX, NY, NZ, niter, &verified, &class_npb); timer_stop(PROFILING_TOTAL_TIME); total_time = timer_read(PROFILING_TOTAL_TIME); if(total_time != 0.0){ mflops = 1.0e-6 * ((double)(NTOTAL)) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL)))*niter) / total_time; }else{ mflops = 0.0; } char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " indexmap", threads_per_block_on_compute_indexmap, timer_read(PROFILING_INDEXMAP), (timer_read(PROFILING_INDEXMAP)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " initial conditions", threads_per_block_on_compute_initial_conditions, timer_read(PROFILING_INITIAL_CONDITIONS), (timer_read(PROFILING_INITIAL_CONDITIONS)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " init ui", threads_per_block_on_init_ui, timer_read(PROFILING_INIT_UI), (timer_read(PROFILING_INIT_UI)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " evolve", threads_per_block_on_evolve, timer_read(PROFILING_EVOLVE), (timer_read(PROFILING_EVOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 1", threads_per_block_on_fftx_1, timer_read(PROFILING_FFTX_1), (timer_read(PROFILING_FFTX_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 2", threads_per_block_on_fftx_2, timer_read(PROFILING_FFTX_2), (timer_read(PROFILING_FFTX_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 3", threads_per_block_on_fftx_3, timer_read(PROFILING_FFTX_3), (timer_read(PROFILING_FFTX_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 1", threads_per_block_on_ffty_1, timer_read(PROFILING_FFTY_1), (timer_read(PROFILING_FFTY_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 2", threads_per_block_on_ffty_2, timer_read(PROFILING_FFTY_2), (timer_read(PROFILING_FFTY_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 3", threads_per_block_on_ffty_3, timer_read(PROFILING_FFTY_3), (timer_read(PROFILING_FFTY_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 1", threads_per_block_on_fftz_1, timer_read(PROFILING_FFTZ_1), (timer_read(PROFILING_FFTZ_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 2", threads_per_block_on_fftz_2, timer_read(PROFILING_FFTZ_2), (timer_read(PROFILING_FFTZ_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 3", threads_per_block_on_fftz_3, timer_read(PROFILING_FFTZ_3), (timer_read(PROFILING_FFTZ_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " checksum", threads_per_block_on_checksum, timer_read(PROFILING_CHECKSUM), (timer_read(PROFILING_CHECKSUM)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " indexmap", threads_per_block_on_compute_indexmap); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " initial conditions", threads_per_block_on_compute_initial_conditions); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " init ui", threads_per_block_on_init_ui); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " evolve", threads_per_block_on_evolve); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 1", threads_per_block_on_fftx_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 2", threads_per_block_on_fftx_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 3", threads_per_block_on_fftx_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 1", threads_per_block_on_ffty_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 2", threads_per_block_on_ffty_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 3", threads_per_block_on_ffty_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 1", threads_per_block_on_fftz_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 2", threads_per_block_on_fftz_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 3", threads_per_block_on_fftz_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " checksum", threads_per_block_on_checksum); strcat(gpu_config_string, gpu_config); #endif c_print_results((char*)"FT", class_npb, NX, NY, NZ, niter, total_time, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, (char*)gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); release_gpu(); return 0; } static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTX_1); #endif hipLaunchKernelGGL(( cffts1_gpu_kernel_1), dim3(blocks_per_grid_on_fftx_1), dim3( threads_per_block_on_fftx_1), 0, 0, x_in, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTX_2); #endif hipLaunchKernelGGL(( cffts1_gpu_kernel_2), dim3(blocks_per_grid_on_fftx_2), dim3( threads_per_block_on_fftx_2), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTX_3); #endif hipLaunchKernelGGL(( cffts1_gpu_kernel_3), dim3(blocks_per_grid_on_fftx_3), dim3( threads_per_block_on_fftx_3), 0, 0, x_out, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_3); #endif } /* * ---------------------------------------------------------------------- * y0[z][x][y] = x_in[z][y][x] * * y0[y + x*NY + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); y0[y+(x*NY)+(z*NX*NY)].real = x_in[x_y_z].real; y0[y+(x*NY)+(z*NX*NY)].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = j + variable*NY + k*NX*NY | variable is i and transforms x axis * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int y_z = blockIdx.x * blockDim.x + threadIdx.x; if(y_z >= (NY*NZ)){ return; } int j, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; j = y_z % NY; /* j = y */ k = (y_z / NY) % NZ; /* k = z */ const int logd1 = ilog2_device(NX); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd1; l+=2){ n1 = NX / 2; lk = 1 << (l - 1); li = 1 << (logd1 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][j] */ x11_real = gty1[j + (i11+k1)*NY + k*NX*NY].real; x11_imag = gty1[j + (i11+k1)*NY + k*NX*NY].imag; /* gty1[k][i12+k1][j] */ x21_real = gty1[j + (i12+k1)*NY + k*NX*NY].real; x21_imag = gty1[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty2[j + (i21+k1)*NY + k*NX*NY].real = x11_real + x21_real; gty2[j + (i21+k1)*NY + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][j] */ gty2[j + (i22+k1)*NY + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[j + (i22+k1)*NY + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd1){ for(j1=0; j1<NX; j1++){ /* gty1[k][j1][j] */ gty1[j + j1*NY + k*NX*NY].real = gty2[j + j1*NY + k*NX*NY].real; gty1[j + j1*NY + k*NX*NY].imag = gty2[j + j1*NY + k*NX*NY].imag; } }else{ n1 = NX / 2; lk = 1 << (l+1 - 1); li = 1 << (logd1 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][j] */ x12_real = gty2[j + (i11+k1)*NY + k*NX*NY].real; x12_imag = gty2[j + (i11+k1)*NY + k*NX*NY].imag; /* gty2[k][i12+k1][j] */ x22_real = gty2[j + (i12+k1)*NY + k*NX*NY].real; x22_imag = gty2[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty1[j + (i21+k1)*NY + k*NX*NY].real = x12_real + x22_real; gty1[j + (i21+k1)*NY + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][j] */ gty1[j + (i22+k1)*NY + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[j + (i22+k1)*NY + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][x][y] * * x_out[x + y*NX + z*NX*NY] = y0[y + x*NY + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); x_out[x_y_z].real = y0[y+(x*NY)+(z*NX*NY)].real; x_out[x_y_z].imag = y0[y+(x*NY)+(z*NX*NY)].imag; } static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTY_1); #endif hipLaunchKernelGGL(( cffts2_gpu_kernel_1), dim3(blocks_per_grid_on_ffty_1), dim3( threads_per_block_on_ffty_1), 0, 0, x_in, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTY_2); #endif hipLaunchKernelGGL(( cffts2_gpu_kernel_2), dim3(blocks_per_grid_on_ffty_2), dim3( threads_per_block_on_ffty_2), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTY_3); #endif hipLaunchKernelGGL(( cffts2_gpu_kernel_3), dim3(blocks_per_grid_on_ffty_3), dim3( threads_per_block_on_ffty_3), 0, 0, x_out, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_3); #endif } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + variable*NX + k*NX*NY | variable is j and transforms y axis * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_z >= (NX*NZ)){ return; } int i, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; i = x_z % NX; /* i = x */ k = (x_z / NX) % NZ; /* k = z */ const int logd2 = ilog2_device(NY); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd2; l+=2){ n1 = NY / 2; lk = 1 << (l - 1); li = 1 << (logd2 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][i] */ x11_real = gty1[i + (i11+k1)*NX + k*NX*NY].real; x11_imag = gty1[i + (i11+k1)*NX + k*NX*NY].imag; /* gty1[k][i12+k1][i] */ x21_real = gty1[i + (i12+k1)*NX + k*NX*NY].real; x21_imag = gty1[i + (i12+k1)*NX + k*NX*NY].imag; /* gty2[k][i21+k1][i] */ gty2[i + (i21+k1)*NX + k*NX*NY].real = x11_real + x21_real; gty2[i + (i21+k1)*NX + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][i] */ gty2[i + (i22+k1)*NX + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[i + (i22+k1)*NX + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd2){ for(j1=0; j1<NY; j1++){ /* gty1[k][j1][i] */ gty1[i + j1*NX + k*NX*NY].real = gty2[i + j1*NX + k*NX*NY].real; gty1[i + j1*NX + k*NX*NY].imag = gty2[i + j1*NX + k*NX*NY].imag; } } else{ n1 = NY / 2; lk = 1 << (l+1 - 1); li = 1 << (logd2 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][i] */ x12_real = gty2[i + (i11+k1)*NX + k*NX*NY].real; x12_imag = gty2[i + (i11+k1)*NX + k*NX*NY].imag; /* gty2[k][i12+k1][i] */ x22_real = gty2[i + (i12+k1)*NX + k*NX*NY].real; x22_imag = gty2[i + (i12+k1)*NX + k*NX*NY].imag; /* gty1[k][i21+k1][i] */ gty1[i + (i21+k1)*NX + k*NX*NY].real = x12_real + x22_real; gty1[i + (i21+k1)*NX + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][i] */ gty1[i + (i22+k1)*NX + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[i + (i22+k1)*NX + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTZ_1); #endif hipLaunchKernelGGL(( cffts3_gpu_kernel_1), dim3(blocks_per_grid_on_fftz_1), dim3( threads_per_block_on_fftz_1), 0, 0, x_in, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTZ_2); #endif hipLaunchKernelGGL(( cffts3_gpu_kernel_2), dim3(blocks_per_grid_on_fftz_2), dim3( threads_per_block_on_fftz_2), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTZ_3); #endif hipLaunchKernelGGL(( cffts3_gpu_kernel_3), dim3(blocks_per_grid_on_fftz_3), dim3( threads_per_block_on_fftz_3), 0, 0, x_out, y0); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_3); #endif } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg){ int j,l; /* * --------------------------------------------------------------------- * perform one variant of the Stockham FFT. * --------------------------------------------------------------------- */ for(l=1; l<=m; l+=2){ cffts3_gpu_fftz2_device(is, l, m, n, u_device, x, y, index_arg, size_arg); if(l==m){break;} cffts3_gpu_fftz2_device(is, l + 1, m, n, u_device, y, x, index_arg, size_arg); } /* * --------------------------------------------------------------------- * copy Y to X. * --------------------------------------------------------------------- */ if(m%2==1){ for(j=0; j<n; j++){ x[j*size_arg+index_arg].real = y[j*size_arg+index_arg].real; x[j*size_arg+index_arg].imag = y[j*size_arg+index_arg].imag; } } } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg){ int k,n1,li,lj,lk,ku,i,i11,i12,i21,i22; double x11real, x11imag; double x21real, x21imag; dcomplex u1; /* * --------------------------------------------------------------------- * set initial parameters. * --------------------------------------------------------------------- */ n1 = n / 2; lk = 1 << (l - 1); li = 1 << (m - l); lj = 2 * lk; ku = li; for(i=0; i<li; i++){ i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if(is>=1){ u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; }else{ u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } for(k=0; k<lk; k++){ x11real = x[(i11+k)*size_arg+index_arg].real; x11imag = x[(i11+k)*size_arg+index_arg].imag; x21real = x[(i12+k)*size_arg+index_arg].real; x21imag = x[(i12+k)*size_arg+index_arg].imag; y[(i21+k)*size_arg+index_arg].real = x11real + x21real; y[(i21+k)*size_arg+index_arg].imag = x11imag + x21imag; y[(i22+k)*size_arg+index_arg].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[(i22+k)*size_arg+index_arg].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_y = blockIdx.x * blockDim.x + threadIdx.x; if(x_y >= (NX*NY)){ return; } cffts3_gpu_cfftz_device(is, ilog2_device(NZ), NZ, gty1 , gty2, u_device, x_y /* index_arg */, NX*NY /* size_arg */); } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void checksum_gpu(int iteration, dcomplex u1[]){ #if defined(PROFILING) timer_start(PROFILING_CHECKSUM); #endif hipLaunchKernelGGL(( checksum_gpu_kernel), dim3(blocks_per_grid_on_checksum), dim3( threads_per_block_on_checksum), size_shared_data, 0, iteration, u1, sums_device); #if defined(PROFILING) timer_stop(PROFILING_CHECKSUM); #endif } __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]){ dcomplex* share_sums = (dcomplex*)(extern_share_data); int j = (blockIdx.x * blockDim.x + threadIdx.x) + 1; int q, r, s; if(j<=CHECKSUM_TASKS){ q = j % NX; r = 3*j % NY; s = 5*j % NZ; share_sums[threadIdx.x] = u1[ q + r*NX + s*NX*NY ]; }else{ share_sums[threadIdx.x] = dcomplex_create(0.0, 0.0); } __syncthreads(); for(int i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ share_sums[threadIdx.x] = dcomplex_add(share_sums[threadIdx.x], share_sums[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0){ share_sums[0].real = share_sums[0].real/(double)(NTOTAL); atomicAdd(&sums[iteration].real,share_sums[0].real); share_sums[0].imag = share_sums[0].imag/(double)(NTOTAL); atomicAdd(&sums[iteration].imag,share_sums[0].imag); } } static void compute_indexmap_gpu(double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_INDEXMAP); #endif hipLaunchKernelGGL(( compute_indexmap_gpu_kernel), dim3(blocks_per_grid_on_compute_indexmap), dim3( threads_per_block_on_compute_indexmap), 0, 0, twiddle); #if defined(PROFILING) timer_stop(PROFILING_INDEXMAP); #endif } __global__ void compute_indexmap_gpu_kernel(double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } int i = thread_id % NX; int j = (thread_id / NX) % NY; int k = thread_id / (NX * NY); int kk, kk2, jj, kj2, ii; kk = ((k+NZ/2) % NZ) - NZ/2; kk2 = kk*kk; jj = ((j+NY/2) % NY) - NY/2; kj2 = jj*jj+kk2; ii = ((i+NX/2) % NX) - NX/2; twiddle[thread_id] = exp(AP*(double)(ii*ii+kj2)); } static void compute_initial_conditions_gpu(dcomplex u0[]){ #if defined(PROFILING) timer_start(PROFILING_INITIAL_CONDITIONS); #endif int z; double start, an, starts[NZ]; start = SEED; ipow46(A, 0, &an); randlc(&start, an); ipow46(A, 2*NX*NY, &an); starts[0] = start; for(z=1; z<NZ; z++){ randlc(&start, an); starts[z] = start; } hipMemcpy(starts_device, starts, size_starts_device, hipMemcpyHostToDevice); hipLaunchKernelGGL(( compute_initial_conditions_gpu_kernel), dim3(blocks_per_grid_on_compute_initial_conditions), dim3( threads_per_block_on_compute_initial_conditions), 0, 0, u0, starts_device); #if defined(PROFILING) timer_stop(PROFILING_INITIAL_CONDITIONS); #endif } __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]){ int z = blockIdx.x * blockDim.x + threadIdx.x; if(z>=NZ){return;} double x0 = starts[z]; for(int y=0; y<NY; y++){ vranlc_device(2*NX, &x0, A, (double*)&u0[ 0 + y*NX + z*NX*NY ]); } } static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_EVOLVE); #endif hipLaunchKernelGGL(( evolve_gpu_kernel), dim3(blocks_per_grid_on_evolve), dim3( threads_per_block_on_evolve), 0, 0, u0, u1, twiddle); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_EVOLVE); #endif } __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=(NZ*NY*NX)){ return; } u0[thread_id] = dcomplex_mul2(u0[thread_id], twiddle[thread_id]); u1[thread_id] = u0[thread_id]; } static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]){ /* * --------------------------------------------------------------------- * note: args x1, x2 must be different arrays * note: args for cfftsx are (direction, layout, xin, xout, scratch) * xin/xout may be the same and it can be somewhat faster * if they are * --------------------------------------------------------------------- */ if(dir==1){ cffts1_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts3_gpu(1, u_device, x1, x2, y0_device, y1_device); }else{ cffts3_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts1_gpu(-1, u_device, x1, x2, y0_device, y1_device); } } static void fft_init_gpu(int n){ #if defined(PROFILING) timer_start(PROFILING_INIT); #endif int m,ku,i,j,ln; double t, ti; /* * --------------------------------------------------------------------- * initialize the U array with sines and cosines in a manner that permits * stride one access at each FFT iteration. * --------------------------------------------------------------------- */ m = ilog2(n); u[0] = dcomplex_create((double)m, 0.0); ku = 2; ln = 1; for(j=1; j<=m; j++){ t = PI / ln; for(i=0; i<=ln-1; i++){ ti = i * t; u[i+ku-1] = dcomplex_create(cos(ti), sin(ti)); } ku = ku + ln; ln = 2 * ln; } hipMemcpy(u_device, u, size_u_device, hipMemcpyHostToDevice); #if defined(PROFILING) timer_stop(PROFILING_INIT); #endif } static int ilog2(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } __device__ int ilog2_device(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_INIT_UI); #endif hipLaunchKernelGGL(( init_ui_gpu_kernel), dim3(blocks_per_grid_on_init_ui), dim3( threads_per_block_on_init_ui), 0, 0, u0, u1, twiddle); hipDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_INIT_UI); #endif } __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } u0[thread_id] = dcomplex_create(0.0, 0.0); u1[thread_id] = dcomplex_create(0.0, 0.0); twiddle[thread_id] = 0.0; } static void ipow46(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc(&q, q); n = n2; }else{ randlc(&r, q); n = n-1; } } randlc(&r, q); *result = r; } __device__ void ipow46_device(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc_device(&q, q); n = n2; }else{ randlc_device(&r, q); n = n-1; } } randlc_device(&r, q); *result = r; } __device__ double randlc_device(double* x, double a){ double t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; t1 = R23 * (*x); x1 = (int)t1; x2 = (*x) - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); (*x) = t3 - T46 * t4; return (R46 * (*x)); } static void release_gpu(){ hipFree(sums_device); hipFree(starts_device); hipFree(twiddle_device); hipFree(u_device); hipFree(u0_device); hipFree(u1_device); hipFree(y0_device); hipFree(y1_device); } static void setup(){ niter = NITER_DEFAULT; printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - FT Benchmark\n\n"); printf(" Size : %4dx%4dx%4d\n", NX, NY, NZ); printf(" Iterations :%7d\n", niter); printf("\n"); } static void setup_gpu(){ /* * struct hipDeviceProp_t{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ hipGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } hipSetDevice(gpu_device_id); hipGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP>=1)&& (FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_compute_indexmap = FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP; }else{ threads_per_block_on_compute_indexmap = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS>=1)&& (FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_compute_initial_conditions = FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS; }else{ threads_per_block_on_compute_initial_conditions = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_INIT_UI>=1)&& (FT_THREADS_PER_BLOCK_ON_INIT_UI<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_init_ui = FT_THREADS_PER_BLOCK_ON_INIT_UI; }else{ threads_per_block_on_init_ui=gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_EVOLVE>=1)&& (FT_THREADS_PER_BLOCK_ON_EVOLVE<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_evolve = FT_THREADS_PER_BLOCK_ON_EVOLVE; }else{ threads_per_block_on_evolve=gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_1 = FT_THREADS_PER_BLOCK_ON_FFTX_1; }else{ threads_per_block_on_fftx_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_2 = FT_THREADS_PER_BLOCK_ON_FFTX_2; }else{ threads_per_block_on_fftx_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_3 = FT_THREADS_PER_BLOCK_ON_FFTX_3; }else{ threads_per_block_on_fftx_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_1 = FT_THREADS_PER_BLOCK_ON_FFTY_1; }else{ threads_per_block_on_ffty_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_2 = FT_THREADS_PER_BLOCK_ON_FFTY_2; }else{ threads_per_block_on_ffty_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_3 = FT_THREADS_PER_BLOCK_ON_FFTY_3; }else{ threads_per_block_on_ffty_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_1 = FT_THREADS_PER_BLOCK_ON_FFTZ_1; }else{ threads_per_block_on_fftz_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_2 = FT_THREADS_PER_BLOCK_ON_FFTZ_2; }else{ threads_per_block_on_fftz_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_3 = FT_THREADS_PER_BLOCK_ON_FFTZ_3; }else{ threads_per_block_on_fftz_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_CHECKSUM>=1)&& (FT_THREADS_PER_BLOCK_ON_CHECKSUM<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_checksum = FT_THREADS_PER_BLOCK_ON_CHECKSUM; }else{ threads_per_block_on_checksum = gpu_device_properties.warpSize; } blocks_per_grid_on_compute_indexmap=ceil(double(NTOTAL)/double(threads_per_block_on_compute_indexmap)); blocks_per_grid_on_compute_initial_conditions=ceil(double(NZ)/double(threads_per_block_on_compute_initial_conditions)); blocks_per_grid_on_init_ui=ceil(double(NTOTAL)/double(threads_per_block_on_init_ui)); blocks_per_grid_on_evolve=ceil(double(NTOTAL)/double(threads_per_block_on_evolve)); blocks_per_grid_on_fftx_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftx_1)); blocks_per_grid_on_fftx_2=ceil(double(NY*NZ)/double(threads_per_block_on_fftx_2)); blocks_per_grid_on_fftx_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftx_3)); blocks_per_grid_on_ffty_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_ffty_1)); blocks_per_grid_on_ffty_2=ceil(double(NX*NZ)/double(threads_per_block_on_ffty_2)); blocks_per_grid_on_ffty_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_ffty_3)); blocks_per_grid_on_fftz_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftz_1)); blocks_per_grid_on_fftz_2=ceil(double(NX*NY)/double(threads_per_block_on_fftz_2)); blocks_per_grid_on_fftz_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftz_3)); blocks_per_grid_on_checksum=ceil(double(CHECKSUM_TASKS)/double(threads_per_block_on_checksum)); size_sums_device=(NITER_DEFAULT+1)*sizeof(dcomplex); size_starts_device=NZ*sizeof(double); size_twiddle_device=NTOTAL*sizeof(double); size_u_device=MAXDIM*sizeof(dcomplex); size_u0_device=NTOTAL*sizeof(dcomplex); size_u1_device=NTOTAL*sizeof(dcomplex); size_y0_device=NTOTAL*sizeof(dcomplex); size_y1_device=NTOTAL*sizeof(dcomplex); size_shared_data=threads_per_block_on_checksum*sizeof(dcomplex); hipMalloc(&sums_device, size_sums_device); hipMalloc(&starts_device, size_starts_device); hipMalloc(&twiddle_device, size_twiddle_device); hipMalloc(&u_device, size_u_device); hipMalloc(&u0_device, size_u0_device); hipMalloc(&u1_device, size_u1_device); hipMalloc(&y0_device, size_y0_device); hipMalloc(&y1_device, size_y1_device); omp_set_num_threads(OMP_THREADS); } static void verify(int d1, int d2, int d3, int nt, boolean* verified, char* class_npb){ int i; double err, epsilon; /* * --------------------------------------------------------------------- * reference checksums * --------------------------------------------------------------------- */ dcomplex csum_ref[25+1]; *class_npb = 'U'; epsilon = 1.0e-12; *verified = false; if(d1 == 64 && d2 == 64 && d3 == 64 && nt == 6){ /* * --------------------------------------------------------------------- * sample size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'S'; csum_ref[1] = dcomplex_create(5.546087004964E+02, 4.845363331978E+02); csum_ref[2] = dcomplex_create(5.546385409189E+02, 4.865304269511E+02); csum_ref[3] = dcomplex_create(5.546148406171E+02, 4.883910722336E+02); csum_ref[4] = dcomplex_create(5.545423607415E+02, 4.901273169046E+02); csum_ref[5] = dcomplex_create(5.544255039624E+02, 4.917475857993E+02); csum_ref[6] = dcomplex_create(5.542683411902E+02, 4.932597244941E+02); }else if(d1 == 128 && d2 == 128 && d3 == 32 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb W size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'W'; csum_ref[1] = dcomplex_create(5.673612178944E+02, 5.293246849175E+02); csum_ref[2] = dcomplex_create(5.631436885271E+02, 5.282149986629E+02); csum_ref[3] = dcomplex_create(5.594024089970E+02, 5.270996558037E+02); csum_ref[4] = dcomplex_create(5.560698047020E+02, 5.260027904925E+02); csum_ref[5] = dcomplex_create(5.530898991250E+02, 5.249400845633E+02); csum_ref[6] = dcomplex_create(5.504159734538E+02, 5.239212247086E+02); }else if(d1 == 256 && d2 == 256 && d3 == 128 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb A size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'A'; csum_ref[1] = dcomplex_create(5.046735008193E+02, 5.114047905510E+02); csum_ref[2] = dcomplex_create(5.059412319734E+02, 5.098809666433E+02); csum_ref[3] = dcomplex_create(5.069376896287E+02, 5.098144042213E+02); csum_ref[4] = dcomplex_create(5.077892868474E+02, 5.101336130759E+02); csum_ref[5] = dcomplex_create(5.085233095391E+02, 5.104914655194E+02); csum_ref[6] = dcomplex_create(5.091487099959E+02, 5.107917842803E+02); }else if(d1 == 512 && d2 == 256 && d3 == 256 && nt == 20){ /* * -------------------------------------------------------------------- * class_npb B size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'B'; csum_ref[1] = dcomplex_create(5.177643571579E+02, 5.077803458597E+02); csum_ref[2] = dcomplex_create(5.154521291263E+02, 5.088249431599E+02); csum_ref[3] = dcomplex_create(5.146409228649E+02, 5.096208912659E+02); csum_ref[4] = dcomplex_create(5.142378756213E+02, 5.101023387619E+02); csum_ref[5] = dcomplex_create(5.139626667737E+02, 5.103976610617E+02); csum_ref[6] = dcomplex_create(5.137423460082E+02, 5.105948019802E+02); csum_ref[7] = dcomplex_create(5.135547056878E+02, 5.107404165783E+02); csum_ref[8] = dcomplex_create(5.133910925466E+02, 5.108576573661E+02); csum_ref[9] = dcomplex_create(5.132470705390E+02, 5.109577278523E+02); csum_ref[10] = dcomplex_create(5.131197729984E+02, 5.110460304483E+02); csum_ref[11] = dcomplex_create(5.130070319283E+02, 5.111252433800E+02); csum_ref[12] = dcomplex_create(5.129070537032E+02, 5.111968077718E+02); csum_ref[13] = dcomplex_create(5.128182883502E+02, 5.112616233064E+02); csum_ref[14] = dcomplex_create(5.127393733383E+02, 5.113203605551E+02); csum_ref[15] = dcomplex_create(5.126691062020E+02, 5.113735928093E+02); csum_ref[16] = dcomplex_create(5.126064276004E+02, 5.114218460548E+02); csum_ref[17] = dcomplex_create(5.125504076570E+02, 5.114656139760E+02); csum_ref[18] = dcomplex_create(5.125002331720E+02, 5.115053595966E+02); csum_ref[19] = dcomplex_create(5.124551951846E+02, 5.115415130407E+02); csum_ref[20] = dcomplex_create(5.124146770029E+02, 5.115744692211E+02); }else if(d1 == 512 && d2 == 512 && d3 == 512 && nt == 20){ /* * --------------------------------------------------------------------- * class_npb C size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'C'; csum_ref[1] = dcomplex_create(5.195078707457E+02, 5.149019699238E+02); csum_ref[2] = dcomplex_create(5.155422171134E+02, 5.127578201997E+02); csum_ref[3] = dcomplex_create(5.144678022222E+02, 5.122251847514E+02); csum_ref[4] = dcomplex_create(5.140150594328E+02, 5.121090289018E+02); csum_ref[5] = dcomplex_create(5.137550426810E+02, 5.121143685824E+02); csum_ref[6] = dcomplex_create(5.135811056728E+02, 5.121496764568E+02); csum_ref[7] = dcomplex_create(5.134569343165E+02, 5.121870921893E+02); csum_ref[8] = dcomplex_create(5.133651975661E+02, 5.122193250322E+02); csum_ref[9] = dcomplex_create(5.132955192805E+02, 5.122454735794E+02); csum_ref[10] = dcomplex_create(5.132410471738E+02, 5.122663649603E+02); csum_ref[11] = dcomplex_create(5.131971141679E+02, 5.122830879827E+02); csum_ref[12] = dcomplex_create(5.131605205716E+02, 5.122965869718E+02); csum_ref[13] = dcomplex_create(5.131290734194E+02, 5.123075927445E+02); csum_ref[14] = dcomplex_create(5.131012720314E+02, 5.123166486553E+02); csum_ref[15] = dcomplex_create(5.130760908195E+02, 5.123241541685E+02); csum_ref[16] = dcomplex_create(5.130528295923E+02, 5.123304037599E+02); csum_ref[17] = dcomplex_create(5.130310107773E+02, 5.123356167976E+02); csum_ref[18] = dcomplex_create(5.130103090133E+02, 5.123399592211E+02); csum_ref[19] = dcomplex_create(5.129905029333E+02, 5.123435588985E+02); csum_ref[20] = dcomplex_create(5.129714421109E+02, 5.123465164008E+02); }else if(d1 == 2048 && d2 == 1024 && d3 == 1024 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb D size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'D'; csum_ref[1] = dcomplex_create(5.122230065252E+02, 5.118534037109E+02); csum_ref[2] = dcomplex_create(5.120463975765E+02, 5.117061181082E+02); csum_ref[3] = dcomplex_create(5.119865766760E+02, 5.117096364601E+02); csum_ref[4] = dcomplex_create(5.119518799488E+02, 5.117373863950E+02); csum_ref[5] = dcomplex_create(5.119269088223E+02, 5.117680347632E+02); csum_ref[6] = dcomplex_create(5.119082416858E+02, 5.117967875532E+02); csum_ref[7] = dcomplex_create(5.118943814638E+02, 5.118225281841E+02); csum_ref[8] = dcomplex_create(5.118842385057E+02, 5.118451629348E+02); csum_ref[9] = dcomplex_create(5.118769435632E+02, 5.118649119387E+02); csum_ref[10] = dcomplex_create(5.118718203448E+02, 5.118820803844E+02); csum_ref[11] = dcomplex_create(5.118683569061E+02, 5.118969781011E+02); csum_ref[12] = dcomplex_create(5.118661708593E+02, 5.119098918835E+02); csum_ref[13] = dcomplex_create(5.118649768950E+02, 5.119210777066E+02); csum_ref[14] = dcomplex_create(5.118645605626E+02, 5.119307604484E+02); csum_ref[15] = dcomplex_create(5.118647586618E+02, 5.119391362671E+02); csum_ref[16] = dcomplex_create(5.118654451572E+02, 5.119463757241E+02); csum_ref[17] = dcomplex_create(5.118665212451E+02, 5.119526269238E+02); csum_ref[18] = dcomplex_create(5.118679083821E+02, 5.119580184108E+02); csum_ref[19] = dcomplex_create(5.118695433664E+02, 5.119626617538E+02); csum_ref[20] = dcomplex_create(5.118713748264E+02, 5.119666538138E+02); csum_ref[21] = dcomplex_create(5.118733606701E+02, 5.119700787219E+02); csum_ref[22] = dcomplex_create(5.118754661974E+02, 5.119730095953E+02); csum_ref[23] = dcomplex_create(5.118776626738E+02, 5.119755100241E+02); csum_ref[24] = dcomplex_create(5.118799262314E+02, 5.119776353561E+02); csum_ref[25] = dcomplex_create(5.118822370068E+02, 5.119794338060E+02); }else if(d1 == 4096 && d2 == 2048 && d3 == 2048 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb E size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'E'; csum_ref[1] = dcomplex_create(5.121601045346E+02, 5.117395998266E+02); csum_ref[2] = dcomplex_create(5.120905403678E+02, 5.118614716182E+02); csum_ref[3] = dcomplex_create(5.120623229306E+02, 5.119074203747E+02); csum_ref[4] = dcomplex_create(5.120438418997E+02, 5.119345900733E+02); csum_ref[5] = dcomplex_create(5.120311521872E+02, 5.119551325550E+02); csum_ref[6] = dcomplex_create(5.120226088809E+02, 5.119720179919E+02); csum_ref[7] = dcomplex_create(5.120169296534E+02, 5.119861371665E+02); csum_ref[8] = dcomplex_create(5.120131225172E+02, 5.119979364402E+02); csum_ref[9] = dcomplex_create(5.120104767108E+02, 5.120077674092E+02); csum_ref[10] = dcomplex_create(5.120085127969E+02, 5.120159443121E+02); csum_ref[11] = dcomplex_create(5.120069224127E+02, 5.120227453670E+02); csum_ref[12] = dcomplex_create(5.120055158164E+02, 5.120284096041E+02); csum_ref[13] = dcomplex_create(5.120041820159E+02, 5.120331373793E+02); csum_ref[14] = dcomplex_create(5.120028605402E+02, 5.120370938679E+02); csum_ref[15] = dcomplex_create(5.120015223011E+02, 5.120404138831E+02); csum_ref[16] = dcomplex_create(5.120001570022E+02, 5.120432068837E+02); csum_ref[17] = dcomplex_create(5.119987650555E+02, 5.120455615860E+02); csum_ref[18] = dcomplex_create(5.119973525091E+02, 5.120475499442E+02); csum_ref[19] = dcomplex_create(5.119959279472E+02, 5.120492304629E+02); csum_ref[20] = dcomplex_create(5.119945006558E+02, 5.120506508902E+02); csum_ref[21] = dcomplex_create(5.119930795911E+02, 5.120518503782E+02); csum_ref[22] = dcomplex_create(5.119916728462E+02, 5.120528612016E+02); csum_ref[23] = dcomplex_create(5.119902874185E+02, 5.120537101195E+02); csum_ref[24] = dcomplex_create(5.119889291565E+02, 5.120544194514E+02); csum_ref[25] = dcomplex_create(5.119876028049E+02, 5.120550079284E+02); } if(*class_npb != 'U'){ *verified = TRUE; for(i = 1; i <= nt; i++){ err = dcomplex_abs(dcomplex_div(dcomplex_sub(sums[i], csum_ref[i]), csum_ref[i])); if(!(err <= epsilon)){ *verified = FALSE; break; } } } if(*class_npb != 'U'){ if(*verified){ printf(" Result verification successful\n"); }else{ printf(" Result verification failed\n"); } } printf(" class_npb = %c\n", *class_npb); } __device__ void vranlc_device(int n, double* x_seed, double a, double y[]){ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; x = *x_seed; for(i=0; i<n; i++){ t1 = R23 * x; x1 = (int)t1; x2 = x - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); x = t3 - T46 * t4; y[i] = R46 * x; } *x_seed = x; }
2c9baa11ac2ec2c7ee08e3103adfc1e76fa38c6c.cu
/* * ------------------------------------------------------------------------------ * * MIT License * * Copyright (c) 2021 Parallel Applications Modelling Group - GMAP * GMAP website: https://gmap.pucrs.br * * Pontifical Catholic University of Rio Grande do Sul (PUCRS) * Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ------------------------------------------------------------------------------ * * The original NPB 3.4 version was written in Fortran and belongs to: * http://www.nas.nasa.gov/Software/NPB/ * * Authors of the Fortran code: * D. Bailey * W. Saphir * * ------------------------------------------------------------------------------ * * The serial C++ version is a translation of the original NPB 3.4 * Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER * * Authors of the C++ code: * Dalvan Griebler <[email protected]> * Gabriell Araujo <[email protected]> * Júnior Löff <[email protected]> * * ------------------------------------------------------------------------------ * * The CUDA version is a parallel implementation of the serial C++ version * CUDA version: https://github.com/GMAP/NPB-GPU/tree/master/CUDA * * Authors of the CUDA code: * Gabriell Araujo <[email protected]> * * ------------------------------------------------------------------------------ */ #include <omp.h> #include <cuda.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" /* * --------------------------------------------------------------------- * u0, u1, u2 are the main arrays in the problem. * depending on the decomposition, these arrays will have different * dimensions. to accomodate all possibilities, we allocate them as * one-dimensional arrays and pass them to subroutines for different * views * - u0 contains the initial (transformed) initial condition * - u1 and u2 are working arrays * - twiddle contains exponents for the time evolution operator. * --------------------------------------------------------------------- * large arrays are in common so that they are allocated on the * heap rather than the stack. this common block is not * referenced directly anywhere else. padding is to avoid accidental * cache problems, since all array sizes are powers of two. * --------------------------------------------------------------------- * we need a bunch of logic to keep track of how * arrays are laid out. * * note: this serial version is the derived from the parallel 0D case * of the ft NPB. * the computation proceeds logically as * * set up initial conditions * fftx(1) * transpose (1->2) * ffty(2) * transpose (2->3) * fftz(3) * time evolution * fftz(3) * transpose (3->2) * ffty(2) * transpose (2->1) * fftx(1) * compute residual(1) * * for the 0D, 1D, 2D strategies, the layouts look like xxx * * 0D 1D 2D * 1: xyz xyz xyz * 2: xyz xyz yxz * 3: xyz zyx zxy * the array dimensions are stored in dims(coord, phase) * --------------------------------------------------------------------- * if processor array is 1x1 -> 0D grid decomposition * * cache blocking params. these values are good for most * RISC processors. * FFT parameters: * fftblock controls how many ffts are done at a time. * the default is appropriate for most cache-based machines * on vector machines, the FFT can be vectorized with vector * length equal to the block size, so the block size should * be as large as possible. this is the size of the smallest * dimension of the problem: 128 for class A, 256 for class B * and 512 for class C. * --------------------------------------------------------------------- */ #define FFTBLOCK_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCKPAD_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCK (FFTBLOCK_DEFAULT) #define FFTBLOCKPAD (FFTBLOCKPAD_DEFAULT) #define SEED (314159265.0) #define A (1220703125.0) #define PI (3.141592653589793238) #define ALPHA (1.0e-6) #define AP (-4.0*ALPHA*PI*PI) #define OMP_THREADS (3) #define TASK_INDEXMAP (0) #define TASK_INITIAL_CONDITIONS (1) #define TASK_INIT_UI (2) #define PROFILING_TOTAL_TIME (0) #define PROFILING_INDEXMAP (1) #define PROFILING_INITIAL_CONDITIONS (2) #define PROFILING_INIT_UI (3) #define PROFILING_EVOLVE (4) #define PROFILING_FFTX_1 (5) #define PROFILING_FFTX_2 (6) #define PROFILING_FFTX_3 (7) #define PROFILING_FFTY_1 (8) #define PROFILING_FFTY_2 (9) #define PROFILING_FFTY_3 (10) #define PROFILING_FFTZ_1 (11) #define PROFILING_FFTZ_2 (12) #define PROFILING_FFTZ_3 (13) #define PROFILING_CHECKSUM (14) #define PROFILING_INIT (15) #define CHECKSUM_TASKS (1024) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static dcomplex sums[NITER_DEFAULT+1]; static double twiddle[NTOTAL]; static dcomplex u[MAXDIM]; static dcomplex u0[NTOTAL]; static dcomplex u1[NTOTAL]; static int dims[3]; #else static dcomplex (*sums)=(dcomplex*)malloc(sizeof(dcomplex)*(NITER_DEFAULT+1)); static double (*twiddle)=(double*)malloc(sizeof(double)*(NTOTAL)); static dcomplex (*u)=(dcomplex*)malloc(sizeof(dcomplex)*(MAXDIM)); static dcomplex (*u0)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static dcomplex (*u1)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static int (*dims)=(int*)malloc(sizeof(int)*(3)); #endif static int niter; /* gpu variables */ double* starts_device; double* twiddle_device; dcomplex* sums_device; dcomplex* u_device; dcomplex* u0_device; dcomplex* u1_device; dcomplex* u2_device; dcomplex* y0_device; dcomplex* y1_device; size_t size_sums_device; size_t size_starts_device; size_t size_twiddle_device; size_t size_u_device; size_t size_u0_device; size_t size_u1_device; size_t size_y0_device; size_t size_y1_device; size_t size_shared_data; int blocks_per_grid_on_compute_indexmap; int blocks_per_grid_on_compute_initial_conditions; int blocks_per_grid_on_init_ui; int blocks_per_grid_on_evolve; int blocks_per_grid_on_fftx_1; int blocks_per_grid_on_fftx_2; int blocks_per_grid_on_fftx_3; int blocks_per_grid_on_ffty_1; int blocks_per_grid_on_ffty_2; int blocks_per_grid_on_ffty_3; int blocks_per_grid_on_fftz_1; int blocks_per_grid_on_fftz_2; int blocks_per_grid_on_fftz_3; int blocks_per_grid_on_checksum; int threads_per_block_on_compute_indexmap; int threads_per_block_on_compute_initial_conditions; int threads_per_block_on_init_ui; int threads_per_block_on_evolve; int threads_per_block_on_fftx_1; int threads_per_block_on_fftx_2; int threads_per_block_on_fftx_3; int threads_per_block_on_ffty_1; int threads_per_block_on_ffty_2; int threads_per_block_on_ffty_3; int threads_per_block_on_fftz_1; int threads_per_block_on_fftz_2; int threads_per_block_on_fftz_3; int threads_per_block_on_checksum; int gpu_device_id; int total_devices; cudaDeviceProp gpu_device_properties; extern __shared__ double extern_share_data[]; /* function declarations */ static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts1_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts2_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg); __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg); __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts3_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void checksum_gpu(int iteration, dcomplex u1[]); __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]); static void compute_indexmap_gpu(double twiddle[]); __global__ void compute_indexmap_gpu_kernel(double twiddle[]); static void compute_initial_conditions_gpu(dcomplex u0[]); __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]); static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]); static void fft_init_gpu(int n); static int ilog2(int n); __device__ int ilog2_device(int n); static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void ipow46(double a, int exponent, double* result); __device__ void ipow46_device(double a, int exponent, double* result); __device__ double randlc_device(double* x, double a); static void release_gpu(); static void setup(); static void setup_gpu(); static void verify (int d1, int d2, int d3, int nt, boolean* verified, char* class_npb); __device__ void vranlc_device(int n, double* x_seed, double a, double y[]); /* ft */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif #if defined(PROFILING) printf(" PROFILING mode on\n"); #endif int iter=0; double total_time, mflops; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * run the entire problem once to make sure all data is touched. * this reduces variable startup costs, which is important for such a * short benchmark. the other NPB 2 implementations are similar. * --------------------------------------------------------------------- */ setup(); setup_gpu(); init_ui_gpu(u0_device, u1_device, twiddle_device); #pragma omp parallel { if(omp_get_thread_num()==TASK_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==TASK_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==TASK_INIT_UI){ fft_init_gpu(MAXDIM); } }cudaDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); /* * --------------------------------------------------------------------- * start over from the beginning. note that all operations must * be timed, in contrast to other benchmarks. * --------------------------------------------------------------------- */ timer_clear(PROFILING_TOTAL_TIME); #if defined(PROFILING) timer_clear(PROFILING_INDEXMAP); timer_clear(PROFILING_INITIAL_CONDITIONS); timer_clear(PROFILING_INITIAL_CONDITIONS); timer_clear(PROFILING_EVOLVE); timer_clear(PROFILING_FFTX_1); timer_clear(PROFILING_FFTX_2); timer_clear(PROFILING_FFTX_3); timer_clear(PROFILING_FFTY_1); timer_clear(PROFILING_FFTY_2); timer_clear(PROFILING_FFTY_3); timer_clear(PROFILING_FFTZ_1); timer_clear(PROFILING_FFTZ_2); timer_clear(PROFILING_FFTZ_3); timer_clear(PROFILING_CHECKSUM); #endif timer_start(PROFILING_TOTAL_TIME); #pragma omp parallel { if(omp_get_thread_num()==TASK_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==TASK_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==TASK_INIT_UI){ fft_init_gpu(MAXDIM); } }cudaDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); for(iter=1; iter<=niter; iter++){ evolve_gpu(u0_device, u1_device, twiddle_device); fft_gpu(-1, u1_device, u1_device); checksum_gpu(iter, u1_device); } cudaMemcpy(sums, sums_device, size_sums_device, cudaMemcpyDeviceToHost); for(iter=1; iter<=niter; iter++){ printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums[iter].real, sums[iter].imag); } verify(NX, NY, NZ, niter, &verified, &class_npb); timer_stop(PROFILING_TOTAL_TIME); total_time = timer_read(PROFILING_TOTAL_TIME); if(total_time != 0.0){ mflops = 1.0e-6 * ((double)(NTOTAL)) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL)))*niter) / total_time; }else{ mflops = 0.0; } char gpu_config[256]; char gpu_config_string[2048]; #if defined(PROFILING) sprintf(gpu_config, "%5s\t%25s\t%25s\t%25s\n", "GPU Kernel", "Threads Per Block", "Time in Seconds", "Time in Percentage"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " indexmap", threads_per_block_on_compute_indexmap, timer_read(PROFILING_INDEXMAP), (timer_read(PROFILING_INDEXMAP)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " initial conditions", threads_per_block_on_compute_initial_conditions, timer_read(PROFILING_INITIAL_CONDITIONS), (timer_read(PROFILING_INITIAL_CONDITIONS)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " init ui", threads_per_block_on_init_ui, timer_read(PROFILING_INIT_UI), (timer_read(PROFILING_INIT_UI)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " evolve", threads_per_block_on_evolve, timer_read(PROFILING_EVOLVE), (timer_read(PROFILING_EVOLVE)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 1", threads_per_block_on_fftx_1, timer_read(PROFILING_FFTX_1), (timer_read(PROFILING_FFTX_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 2", threads_per_block_on_fftx_2, timer_read(PROFILING_FFTX_2), (timer_read(PROFILING_FFTX_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftx 3", threads_per_block_on_fftx_3, timer_read(PROFILING_FFTX_3), (timer_read(PROFILING_FFTX_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 1", threads_per_block_on_ffty_1, timer_read(PROFILING_FFTY_1), (timer_read(PROFILING_FFTY_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 2", threads_per_block_on_ffty_2, timer_read(PROFILING_FFTY_2), (timer_read(PROFILING_FFTY_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " ffty 3", threads_per_block_on_ffty_3, timer_read(PROFILING_FFTY_3), (timer_read(PROFILING_FFTY_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 1", threads_per_block_on_fftz_1, timer_read(PROFILING_FFTZ_1), (timer_read(PROFILING_FFTZ_1)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 2", threads_per_block_on_fftz_2, timer_read(PROFILING_FFTZ_2), (timer_read(PROFILING_FFTZ_2)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " fftz 3", threads_per_block_on_fftz_3, timer_read(PROFILING_FFTZ_3), (timer_read(PROFILING_FFTZ_3)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\t%25f\t%24.2f%%\n", " checksum", threads_per_block_on_checksum, timer_read(PROFILING_CHECKSUM), (timer_read(PROFILING_CHECKSUM)*100/timer_read(PROFILING_TOTAL_TIME))); strcat(gpu_config_string, gpu_config); #else sprintf(gpu_config, "%5s\t%25s\n", "GPU Kernel", "Threads Per Block"); strcpy(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " indexmap", threads_per_block_on_compute_indexmap); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " initial conditions", threads_per_block_on_compute_initial_conditions); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " init ui", threads_per_block_on_init_ui); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " evolve", threads_per_block_on_evolve); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 1", threads_per_block_on_fftx_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 2", threads_per_block_on_fftx_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftx 3", threads_per_block_on_fftx_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 1", threads_per_block_on_ffty_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 2", threads_per_block_on_ffty_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " ffty 3", threads_per_block_on_ffty_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 1", threads_per_block_on_fftz_1); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 2", threads_per_block_on_fftz_2); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " fftz 3", threads_per_block_on_fftz_3); strcat(gpu_config_string, gpu_config); sprintf(gpu_config, "%29s\t%25d\n", " checksum", threads_per_block_on_checksum); strcat(gpu_config_string, gpu_config); #endif c_print_results((char*)"FT", class_npb, NX, NY, NZ, niter, total_time, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)COMPILERVERSION, (char*)LIBVERSION, (char*)CPU_MODEL, (char*)gpu_device_properties.name, (char*)gpu_config_string, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); release_gpu(); return 0; } static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTX_1); #endif cffts1_gpu_kernel_1<<<blocks_per_grid_on_fftx_1, threads_per_block_on_fftx_1>>>(x_in, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTX_2); #endif cffts1_gpu_kernel_2<<<blocks_per_grid_on_fftx_2, threads_per_block_on_fftx_2>>>(is, y0, y1, u); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTX_3); #endif cffts1_gpu_kernel_3<<<blocks_per_grid_on_fftx_3, threads_per_block_on_fftx_3>>>(x_out, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTX_3); #endif } /* * ---------------------------------------------------------------------- * y0[z][x][y] = x_in[z][y][x] * * y0[y + x*NY + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); y0[y+(x*NY)+(z*NX*NY)].real = x_in[x_y_z].real; y0[y+(x*NY)+(z*NX*NY)].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = j + variable*NY + k*NX*NY | variable is i and transforms x axis * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int y_z = blockIdx.x * blockDim.x + threadIdx.x; if(y_z >= (NY*NZ)){ return; } int j, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; j = y_z % NY; /* j = y */ k = (y_z / NY) % NZ; /* k = z */ const int logd1 = ilog2_device(NX); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd1; l+=2){ n1 = NX / 2; lk = 1 << (l - 1); li = 1 << (logd1 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][j] */ x11_real = gty1[j + (i11+k1)*NY + k*NX*NY].real; x11_imag = gty1[j + (i11+k1)*NY + k*NX*NY].imag; /* gty1[k][i12+k1][j] */ x21_real = gty1[j + (i12+k1)*NY + k*NX*NY].real; x21_imag = gty1[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty2[j + (i21+k1)*NY + k*NX*NY].real = x11_real + x21_real; gty2[j + (i21+k1)*NY + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][j] */ gty2[j + (i22+k1)*NY + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[j + (i22+k1)*NY + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd1){ for(j1=0; j1<NX; j1++){ /* gty1[k][j1][j] */ gty1[j + j1*NY + k*NX*NY].real = gty2[j + j1*NY + k*NX*NY].real; gty1[j + j1*NY + k*NX*NY].imag = gty2[j + j1*NY + k*NX*NY].imag; } }else{ n1 = NX / 2; lk = 1 << (l+1 - 1); li = 1 << (logd1 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][j] */ x12_real = gty2[j + (i11+k1)*NY + k*NX*NY].real; x12_imag = gty2[j + (i11+k1)*NY + k*NX*NY].imag; /* gty2[k][i12+k1][j] */ x22_real = gty2[j + (i12+k1)*NY + k*NX*NY].real; x22_imag = gty2[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty1[j + (i21+k1)*NY + k*NX*NY].real = x12_real + x22_real; gty1[j + (i21+k1)*NY + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][j] */ gty1[j + (i22+k1)*NY + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[j + (i22+k1)*NY + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][x][y] * * x_out[x + y*NX + z*NX*NY] = y0[y + x*NY + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); x_out[x_y_z].real = y0[y+(x*NY)+(z*NX*NY)].real; x_out[x_y_z].imag = y0[y+(x*NY)+(z*NX*NY)].imag; } static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTY_1); #endif cffts2_gpu_kernel_1<<<blocks_per_grid_on_ffty_1, threads_per_block_on_ffty_1>>>(x_in, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTY_2); #endif cffts2_gpu_kernel_2<<<blocks_per_grid_on_ffty_2, threads_per_block_on_ffty_2>>>(is, y0, y1, u); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTY_3); #endif cffts2_gpu_kernel_3<<<blocks_per_grid_on_ffty_3, threads_per_block_on_ffty_3>>>(x_out, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTY_3); #endif } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + variable*NX + k*NX*NY | variable is j and transforms y axis * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_z >= (NX*NZ)){ return; } int i, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; i = x_z % NX; /* i = x */ k = (x_z / NX) % NZ; /* k = z */ const int logd2 = ilog2_device(NY); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd2; l+=2){ n1 = NY / 2; lk = 1 << (l - 1); li = 1 << (logd2 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][i] */ x11_real = gty1[i + (i11+k1)*NX + k*NX*NY].real; x11_imag = gty1[i + (i11+k1)*NX + k*NX*NY].imag; /* gty1[k][i12+k1][i] */ x21_real = gty1[i + (i12+k1)*NX + k*NX*NY].real; x21_imag = gty1[i + (i12+k1)*NX + k*NX*NY].imag; /* gty2[k][i21+k1][i] */ gty2[i + (i21+k1)*NX + k*NX*NY].real = x11_real + x21_real; gty2[i + (i21+k1)*NX + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][i] */ gty2[i + (i22+k1)*NX + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[i + (i22+k1)*NX + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd2){ for(j1=0; j1<NY; j1++){ /* gty1[k][j1][i] */ gty1[i + j1*NX + k*NX*NY].real = gty2[i + j1*NX + k*NX*NY].real; gty1[i + j1*NX + k*NX*NY].imag = gty2[i + j1*NX + k*NX*NY].imag; } } else{ n1 = NY / 2; lk = 1 << (l+1 - 1); li = 1 << (logd2 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][i] */ x12_real = gty2[i + (i11+k1)*NX + k*NX*NY].real; x12_imag = gty2[i + (i11+k1)*NX + k*NX*NY].imag; /* gty2[k][i12+k1][i] */ x22_real = gty2[i + (i12+k1)*NX + k*NX*NY].real; x22_imag = gty2[i + (i12+k1)*NX + k*NX*NY].imag; /* gty1[k][i21+k1][i] */ gty1[i + (i21+k1)*NX + k*NX*NY].real = x12_real + x22_real; gty1[i + (i21+k1)*NX + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][i] */ gty1[i + (i22+k1)*NX + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[i + (i22+k1)*NX + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ #if defined(PROFILING) timer_start(PROFILING_FFTZ_1); #endif cffts3_gpu_kernel_1<<<blocks_per_grid_on_fftz_1, threads_per_block_on_fftz_1>>>(x_in, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_1); #endif #if defined(PROFILING) timer_start(PROFILING_FFTZ_2); #endif cffts3_gpu_kernel_2<<<blocks_per_grid_on_fftz_2, threads_per_block_on_fftz_2>>>(is, y0, y1, u); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_2); #endif #if defined(PROFILING) timer_start(PROFILING_FFTZ_3); #endif cffts3_gpu_kernel_3<<<blocks_per_grid_on_fftz_3, threads_per_block_on_fftz_3>>>(x_out, y0); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_FFTZ_3); #endif } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg){ int j,l; /* * --------------------------------------------------------------------- * perform one variant of the Stockham FFT. * --------------------------------------------------------------------- */ for(l=1; l<=m; l+=2){ cffts3_gpu_fftz2_device(is, l, m, n, u_device, x, y, index_arg, size_arg); if(l==m){break;} cffts3_gpu_fftz2_device(is, l + 1, m, n, u_device, y, x, index_arg, size_arg); } /* * --------------------------------------------------------------------- * copy Y to X. * --------------------------------------------------------------------- */ if(m%2==1){ for(j=0; j<n; j++){ x[j*size_arg+index_arg].real = y[j*size_arg+index_arg].real; x[j*size_arg+index_arg].imag = y[j*size_arg+index_arg].imag; } } } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg){ int k,n1,li,lj,lk,ku,i,i11,i12,i21,i22; double x11real, x11imag; double x21real, x21imag; dcomplex u1; /* * --------------------------------------------------------------------- * set initial parameters. * --------------------------------------------------------------------- */ n1 = n / 2; lk = 1 << (l - 1); li = 1 << (m - l); lj = 2 * lk; ku = li; for(i=0; i<li; i++){ i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if(is>=1){ u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; }else{ u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } for(k=0; k<lk; k++){ x11real = x[(i11+k)*size_arg+index_arg].real; x11imag = x[(i11+k)*size_arg+index_arg].imag; x21real = x[(i12+k)*size_arg+index_arg].real; x21imag = x[(i12+k)*size_arg+index_arg].imag; y[(i21+k)*size_arg+index_arg].real = x11real + x21real; y[(i21+k)*size_arg+index_arg].imag = x11imag + x21imag; y[(i22+k)*size_arg+index_arg].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[(i22+k)*size_arg+index_arg].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_y = blockIdx.x * blockDim.x + threadIdx.x; if(x_y >= (NX*NY)){ return; } cffts3_gpu_cfftz_device(is, ilog2_device(NZ), NZ, gty1 , gty2, u_device, x_y /* index_arg */, NX*NY /* size_arg */); } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void checksum_gpu(int iteration, dcomplex u1[]){ #if defined(PROFILING) timer_start(PROFILING_CHECKSUM); #endif checksum_gpu_kernel<<<blocks_per_grid_on_checksum, threads_per_block_on_checksum, size_shared_data>>>(iteration, u1, sums_device); #if defined(PROFILING) timer_stop(PROFILING_CHECKSUM); #endif } __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]){ dcomplex* share_sums = (dcomplex*)(extern_share_data); int j = (blockIdx.x * blockDim.x + threadIdx.x) + 1; int q, r, s; if(j<=CHECKSUM_TASKS){ q = j % NX; r = 3*j % NY; s = 5*j % NZ; share_sums[threadIdx.x] = u1[ q + r*NX + s*NX*NY ]; }else{ share_sums[threadIdx.x] = dcomplex_create(0.0, 0.0); } __syncthreads(); for(int i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ share_sums[threadIdx.x] = dcomplex_add(share_sums[threadIdx.x], share_sums[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0){ share_sums[0].real = share_sums[0].real/(double)(NTOTAL); atomicAdd(&sums[iteration].real,share_sums[0].real); share_sums[0].imag = share_sums[0].imag/(double)(NTOTAL); atomicAdd(&sums[iteration].imag,share_sums[0].imag); } } static void compute_indexmap_gpu(double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_INDEXMAP); #endif compute_indexmap_gpu_kernel<<<blocks_per_grid_on_compute_indexmap, threads_per_block_on_compute_indexmap>>>(twiddle); #if defined(PROFILING) timer_stop(PROFILING_INDEXMAP); #endif } __global__ void compute_indexmap_gpu_kernel(double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } int i = thread_id % NX; int j = (thread_id / NX) % NY; int k = thread_id / (NX * NY); int kk, kk2, jj, kj2, ii; kk = ((k+NZ/2) % NZ) - NZ/2; kk2 = kk*kk; jj = ((j+NY/2) % NY) - NY/2; kj2 = jj*jj+kk2; ii = ((i+NX/2) % NX) - NX/2; twiddle[thread_id] = exp(AP*(double)(ii*ii+kj2)); } static void compute_initial_conditions_gpu(dcomplex u0[]){ #if defined(PROFILING) timer_start(PROFILING_INITIAL_CONDITIONS); #endif int z; double start, an, starts[NZ]; start = SEED; ipow46(A, 0, &an); randlc(&start, an); ipow46(A, 2*NX*NY, &an); starts[0] = start; for(z=1; z<NZ; z++){ randlc(&start, an); starts[z] = start; } cudaMemcpy(starts_device, starts, size_starts_device, cudaMemcpyHostToDevice); compute_initial_conditions_gpu_kernel<<<blocks_per_grid_on_compute_initial_conditions, threads_per_block_on_compute_initial_conditions>>>(u0, starts_device); #if defined(PROFILING) timer_stop(PROFILING_INITIAL_CONDITIONS); #endif } __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]){ int z = blockIdx.x * blockDim.x + threadIdx.x; if(z>=NZ){return;} double x0 = starts[z]; for(int y=0; y<NY; y++){ vranlc_device(2*NX, &x0, A, (double*)&u0[ 0 + y*NX + z*NX*NY ]); } } static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_EVOLVE); #endif evolve_gpu_kernel<<<blocks_per_grid_on_evolve, threads_per_block_on_evolve>>>(u0, u1, twiddle); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_EVOLVE); #endif } __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=(NZ*NY*NX)){ return; } u0[thread_id] = dcomplex_mul2(u0[thread_id], twiddle[thread_id]); u1[thread_id] = u0[thread_id]; } static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]){ /* * --------------------------------------------------------------------- * note: args x1, x2 must be different arrays * note: args for cfftsx are (direction, layout, xin, xout, scratch) * xin/xout may be the same and it can be somewhat faster * if they are * --------------------------------------------------------------------- */ if(dir==1){ cffts1_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts3_gpu(1, u_device, x1, x2, y0_device, y1_device); }else{ cffts3_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts1_gpu(-1, u_device, x1, x2, y0_device, y1_device); } } static void fft_init_gpu(int n){ #if defined(PROFILING) timer_start(PROFILING_INIT); #endif int m,ku,i,j,ln; double t, ti; /* * --------------------------------------------------------------------- * initialize the U array with sines and cosines in a manner that permits * stride one access at each FFT iteration. * --------------------------------------------------------------------- */ m = ilog2(n); u[0] = dcomplex_create((double)m, 0.0); ku = 2; ln = 1; for(j=1; j<=m; j++){ t = PI / ln; for(i=0; i<=ln-1; i++){ ti = i * t; u[i+ku-1] = dcomplex_create(cos(ti), sin(ti)); } ku = ku + ln; ln = 2 * ln; } cudaMemcpy(u_device, u, size_u_device, cudaMemcpyHostToDevice); #if defined(PROFILING) timer_stop(PROFILING_INIT); #endif } static int ilog2(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } __device__ int ilog2_device(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ #if defined(PROFILING) timer_start(PROFILING_INIT_UI); #endif init_ui_gpu_kernel<<<blocks_per_grid_on_init_ui, threads_per_block_on_init_ui>>>(u0, u1, twiddle); cudaDeviceSynchronize(); #if defined(PROFILING) timer_stop(PROFILING_INIT_UI); #endif } __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } u0[thread_id] = dcomplex_create(0.0, 0.0); u1[thread_id] = dcomplex_create(0.0, 0.0); twiddle[thread_id] = 0.0; } static void ipow46(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc(&q, q); n = n2; }else{ randlc(&r, q); n = n-1; } } randlc(&r, q); *result = r; } __device__ void ipow46_device(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc_device(&q, q); n = n2; }else{ randlc_device(&r, q); n = n-1; } } randlc_device(&r, q); *result = r; } __device__ double randlc_device(double* x, double a){ double t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; t1 = R23 * (*x); x1 = (int)t1; x2 = (*x) - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); (*x) = t3 - T46 * t4; return (R46 * (*x)); } static void release_gpu(){ cudaFree(sums_device); cudaFree(starts_device); cudaFree(twiddle_device); cudaFree(u_device); cudaFree(u0_device); cudaFree(u1_device); cudaFree(y0_device); cudaFree(y1_device); } static void setup(){ niter = NITER_DEFAULT; printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - FT Benchmark\n\n"); printf(" Size : %4dx%4dx%4d\n", NX, NY, NZ); printf(" Iterations :%7d\n", niter); printf("\n"); } static void setup_gpu(){ /* * struct cudaDeviceProp{ * char name[256]; * size_t totalGlobalMem; * size_t sharedMemPerBlock; * int regsPerBlock; * int warpSize; * size_t memPitch; * int maxThreadsPerBlock; * int maxThreadsDim[3]; * int maxGridSize[3]; * size_t totalConstMem; * int major; * int minor; * int clockRate; * size_t textureAlignment; * int deviceOverlap; * int multiProcessorCount; * int kernelExecTimeoutEnabled; * int integrated; * int canMapHostMemory; * int computeMode; * int concurrentKernels; * int ECCEnabled; * int pciBusID; * int pciDeviceID; * int tccDriver; * } */ /* amount of available devices */ cudaGetDeviceCount(&total_devices); /* define gpu_device */ if(total_devices==0){ printf("\n\n\nNo Nvidia GPU found!\n\n\n"); exit(-1); }else if((GPU_DEVICE>=0)&& (GPU_DEVICE<total_devices)){ gpu_device_id = GPU_DEVICE; }else{ gpu_device_id = 0; } cudaSetDevice(gpu_device_id); cudaGetDeviceProperties(&gpu_device_properties, gpu_device_id); /* define threads_per_block */ if((FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP>=1)&& (FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_compute_indexmap = FT_THREADS_PER_BLOCK_ON_COMPUTE_INDEXMAP; }else{ threads_per_block_on_compute_indexmap = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS>=1)&& (FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_compute_initial_conditions = FT_THREADS_PER_BLOCK_ON_COMPUTE_INITIAL_CONDITIONS; }else{ threads_per_block_on_compute_initial_conditions = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_INIT_UI>=1)&& (FT_THREADS_PER_BLOCK_ON_INIT_UI<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_init_ui = FT_THREADS_PER_BLOCK_ON_INIT_UI; }else{ threads_per_block_on_init_ui=gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_EVOLVE>=1)&& (FT_THREADS_PER_BLOCK_ON_EVOLVE<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_evolve = FT_THREADS_PER_BLOCK_ON_EVOLVE; }else{ threads_per_block_on_evolve=gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_1 = FT_THREADS_PER_BLOCK_ON_FFTX_1; }else{ threads_per_block_on_fftx_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_2 = FT_THREADS_PER_BLOCK_ON_FFTX_2; }else{ threads_per_block_on_fftx_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTX_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTX_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftx_3 = FT_THREADS_PER_BLOCK_ON_FFTX_3; }else{ threads_per_block_on_fftx_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_1 = FT_THREADS_PER_BLOCK_ON_FFTY_1; }else{ threads_per_block_on_ffty_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_2 = FT_THREADS_PER_BLOCK_ON_FFTY_2; }else{ threads_per_block_on_ffty_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTY_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTY_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_ffty_3 = FT_THREADS_PER_BLOCK_ON_FFTY_3; }else{ threads_per_block_on_ffty_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_1>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_1<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_1 = FT_THREADS_PER_BLOCK_ON_FFTZ_1; }else{ threads_per_block_on_fftz_1 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_2>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_2<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_2 = FT_THREADS_PER_BLOCK_ON_FFTZ_2; }else{ threads_per_block_on_fftz_2 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_FFTZ_3>=1)&& (FT_THREADS_PER_BLOCK_ON_FFTZ_3<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_fftz_3 = FT_THREADS_PER_BLOCK_ON_FFTZ_3; }else{ threads_per_block_on_fftz_3 = gpu_device_properties.warpSize; } if((FT_THREADS_PER_BLOCK_ON_CHECKSUM>=1)&& (FT_THREADS_PER_BLOCK_ON_CHECKSUM<=gpu_device_properties.maxThreadsPerBlock)){ threads_per_block_on_checksum = FT_THREADS_PER_BLOCK_ON_CHECKSUM; }else{ threads_per_block_on_checksum = gpu_device_properties.warpSize; } blocks_per_grid_on_compute_indexmap=ceil(double(NTOTAL)/double(threads_per_block_on_compute_indexmap)); blocks_per_grid_on_compute_initial_conditions=ceil(double(NZ)/double(threads_per_block_on_compute_initial_conditions)); blocks_per_grid_on_init_ui=ceil(double(NTOTAL)/double(threads_per_block_on_init_ui)); blocks_per_grid_on_evolve=ceil(double(NTOTAL)/double(threads_per_block_on_evolve)); blocks_per_grid_on_fftx_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftx_1)); blocks_per_grid_on_fftx_2=ceil(double(NY*NZ)/double(threads_per_block_on_fftx_2)); blocks_per_grid_on_fftx_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftx_3)); blocks_per_grid_on_ffty_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_ffty_1)); blocks_per_grid_on_ffty_2=ceil(double(NX*NZ)/double(threads_per_block_on_ffty_2)); blocks_per_grid_on_ffty_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_ffty_3)); blocks_per_grid_on_fftz_1=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftz_1)); blocks_per_grid_on_fftz_2=ceil(double(NX*NY)/double(threads_per_block_on_fftz_2)); blocks_per_grid_on_fftz_3=ceil(double(NX*NY*NZ)/double(threads_per_block_on_fftz_3)); blocks_per_grid_on_checksum=ceil(double(CHECKSUM_TASKS)/double(threads_per_block_on_checksum)); size_sums_device=(NITER_DEFAULT+1)*sizeof(dcomplex); size_starts_device=NZ*sizeof(double); size_twiddle_device=NTOTAL*sizeof(double); size_u_device=MAXDIM*sizeof(dcomplex); size_u0_device=NTOTAL*sizeof(dcomplex); size_u1_device=NTOTAL*sizeof(dcomplex); size_y0_device=NTOTAL*sizeof(dcomplex); size_y1_device=NTOTAL*sizeof(dcomplex); size_shared_data=threads_per_block_on_checksum*sizeof(dcomplex); cudaMalloc(&sums_device, size_sums_device); cudaMalloc(&starts_device, size_starts_device); cudaMalloc(&twiddle_device, size_twiddle_device); cudaMalloc(&u_device, size_u_device); cudaMalloc(&u0_device, size_u0_device); cudaMalloc(&u1_device, size_u1_device); cudaMalloc(&y0_device, size_y0_device); cudaMalloc(&y1_device, size_y1_device); omp_set_num_threads(OMP_THREADS); } static void verify(int d1, int d2, int d3, int nt, boolean* verified, char* class_npb){ int i; double err, epsilon; /* * --------------------------------------------------------------------- * reference checksums * --------------------------------------------------------------------- */ dcomplex csum_ref[25+1]; *class_npb = 'U'; epsilon = 1.0e-12; *verified = false; if(d1 == 64 && d2 == 64 && d3 == 64 && nt == 6){ /* * --------------------------------------------------------------------- * sample size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'S'; csum_ref[1] = dcomplex_create(5.546087004964E+02, 4.845363331978E+02); csum_ref[2] = dcomplex_create(5.546385409189E+02, 4.865304269511E+02); csum_ref[3] = dcomplex_create(5.546148406171E+02, 4.883910722336E+02); csum_ref[4] = dcomplex_create(5.545423607415E+02, 4.901273169046E+02); csum_ref[5] = dcomplex_create(5.544255039624E+02, 4.917475857993E+02); csum_ref[6] = dcomplex_create(5.542683411902E+02, 4.932597244941E+02); }else if(d1 == 128 && d2 == 128 && d3 == 32 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb W size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'W'; csum_ref[1] = dcomplex_create(5.673612178944E+02, 5.293246849175E+02); csum_ref[2] = dcomplex_create(5.631436885271E+02, 5.282149986629E+02); csum_ref[3] = dcomplex_create(5.594024089970E+02, 5.270996558037E+02); csum_ref[4] = dcomplex_create(5.560698047020E+02, 5.260027904925E+02); csum_ref[5] = dcomplex_create(5.530898991250E+02, 5.249400845633E+02); csum_ref[6] = dcomplex_create(5.504159734538E+02, 5.239212247086E+02); }else if(d1 == 256 && d2 == 256 && d3 == 128 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb A size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'A'; csum_ref[1] = dcomplex_create(5.046735008193E+02, 5.114047905510E+02); csum_ref[2] = dcomplex_create(5.059412319734E+02, 5.098809666433E+02); csum_ref[3] = dcomplex_create(5.069376896287E+02, 5.098144042213E+02); csum_ref[4] = dcomplex_create(5.077892868474E+02, 5.101336130759E+02); csum_ref[5] = dcomplex_create(5.085233095391E+02, 5.104914655194E+02); csum_ref[6] = dcomplex_create(5.091487099959E+02, 5.107917842803E+02); }else if(d1 == 512 && d2 == 256 && d3 == 256 && nt == 20){ /* * -------------------------------------------------------------------- * class_npb B size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'B'; csum_ref[1] = dcomplex_create(5.177643571579E+02, 5.077803458597E+02); csum_ref[2] = dcomplex_create(5.154521291263E+02, 5.088249431599E+02); csum_ref[3] = dcomplex_create(5.146409228649E+02, 5.096208912659E+02); csum_ref[4] = dcomplex_create(5.142378756213E+02, 5.101023387619E+02); csum_ref[5] = dcomplex_create(5.139626667737E+02, 5.103976610617E+02); csum_ref[6] = dcomplex_create(5.137423460082E+02, 5.105948019802E+02); csum_ref[7] = dcomplex_create(5.135547056878E+02, 5.107404165783E+02); csum_ref[8] = dcomplex_create(5.133910925466E+02, 5.108576573661E+02); csum_ref[9] = dcomplex_create(5.132470705390E+02, 5.109577278523E+02); csum_ref[10] = dcomplex_create(5.131197729984E+02, 5.110460304483E+02); csum_ref[11] = dcomplex_create(5.130070319283E+02, 5.111252433800E+02); csum_ref[12] = dcomplex_create(5.129070537032E+02, 5.111968077718E+02); csum_ref[13] = dcomplex_create(5.128182883502E+02, 5.112616233064E+02); csum_ref[14] = dcomplex_create(5.127393733383E+02, 5.113203605551E+02); csum_ref[15] = dcomplex_create(5.126691062020E+02, 5.113735928093E+02); csum_ref[16] = dcomplex_create(5.126064276004E+02, 5.114218460548E+02); csum_ref[17] = dcomplex_create(5.125504076570E+02, 5.114656139760E+02); csum_ref[18] = dcomplex_create(5.125002331720E+02, 5.115053595966E+02); csum_ref[19] = dcomplex_create(5.124551951846E+02, 5.115415130407E+02); csum_ref[20] = dcomplex_create(5.124146770029E+02, 5.115744692211E+02); }else if(d1 == 512 && d2 == 512 && d3 == 512 && nt == 20){ /* * --------------------------------------------------------------------- * class_npb C size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'C'; csum_ref[1] = dcomplex_create(5.195078707457E+02, 5.149019699238E+02); csum_ref[2] = dcomplex_create(5.155422171134E+02, 5.127578201997E+02); csum_ref[3] = dcomplex_create(5.144678022222E+02, 5.122251847514E+02); csum_ref[4] = dcomplex_create(5.140150594328E+02, 5.121090289018E+02); csum_ref[5] = dcomplex_create(5.137550426810E+02, 5.121143685824E+02); csum_ref[6] = dcomplex_create(5.135811056728E+02, 5.121496764568E+02); csum_ref[7] = dcomplex_create(5.134569343165E+02, 5.121870921893E+02); csum_ref[8] = dcomplex_create(5.133651975661E+02, 5.122193250322E+02); csum_ref[9] = dcomplex_create(5.132955192805E+02, 5.122454735794E+02); csum_ref[10] = dcomplex_create(5.132410471738E+02, 5.122663649603E+02); csum_ref[11] = dcomplex_create(5.131971141679E+02, 5.122830879827E+02); csum_ref[12] = dcomplex_create(5.131605205716E+02, 5.122965869718E+02); csum_ref[13] = dcomplex_create(5.131290734194E+02, 5.123075927445E+02); csum_ref[14] = dcomplex_create(5.131012720314E+02, 5.123166486553E+02); csum_ref[15] = dcomplex_create(5.130760908195E+02, 5.123241541685E+02); csum_ref[16] = dcomplex_create(5.130528295923E+02, 5.123304037599E+02); csum_ref[17] = dcomplex_create(5.130310107773E+02, 5.123356167976E+02); csum_ref[18] = dcomplex_create(5.130103090133E+02, 5.123399592211E+02); csum_ref[19] = dcomplex_create(5.129905029333E+02, 5.123435588985E+02); csum_ref[20] = dcomplex_create(5.129714421109E+02, 5.123465164008E+02); }else if(d1 == 2048 && d2 == 1024 && d3 == 1024 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb D size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'D'; csum_ref[1] = dcomplex_create(5.122230065252E+02, 5.118534037109E+02); csum_ref[2] = dcomplex_create(5.120463975765E+02, 5.117061181082E+02); csum_ref[3] = dcomplex_create(5.119865766760E+02, 5.117096364601E+02); csum_ref[4] = dcomplex_create(5.119518799488E+02, 5.117373863950E+02); csum_ref[5] = dcomplex_create(5.119269088223E+02, 5.117680347632E+02); csum_ref[6] = dcomplex_create(5.119082416858E+02, 5.117967875532E+02); csum_ref[7] = dcomplex_create(5.118943814638E+02, 5.118225281841E+02); csum_ref[8] = dcomplex_create(5.118842385057E+02, 5.118451629348E+02); csum_ref[9] = dcomplex_create(5.118769435632E+02, 5.118649119387E+02); csum_ref[10] = dcomplex_create(5.118718203448E+02, 5.118820803844E+02); csum_ref[11] = dcomplex_create(5.118683569061E+02, 5.118969781011E+02); csum_ref[12] = dcomplex_create(5.118661708593E+02, 5.119098918835E+02); csum_ref[13] = dcomplex_create(5.118649768950E+02, 5.119210777066E+02); csum_ref[14] = dcomplex_create(5.118645605626E+02, 5.119307604484E+02); csum_ref[15] = dcomplex_create(5.118647586618E+02, 5.119391362671E+02); csum_ref[16] = dcomplex_create(5.118654451572E+02, 5.119463757241E+02); csum_ref[17] = dcomplex_create(5.118665212451E+02, 5.119526269238E+02); csum_ref[18] = dcomplex_create(5.118679083821E+02, 5.119580184108E+02); csum_ref[19] = dcomplex_create(5.118695433664E+02, 5.119626617538E+02); csum_ref[20] = dcomplex_create(5.118713748264E+02, 5.119666538138E+02); csum_ref[21] = dcomplex_create(5.118733606701E+02, 5.119700787219E+02); csum_ref[22] = dcomplex_create(5.118754661974E+02, 5.119730095953E+02); csum_ref[23] = dcomplex_create(5.118776626738E+02, 5.119755100241E+02); csum_ref[24] = dcomplex_create(5.118799262314E+02, 5.119776353561E+02); csum_ref[25] = dcomplex_create(5.118822370068E+02, 5.119794338060E+02); }else if(d1 == 4096 && d2 == 2048 && d3 == 2048 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb E size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'E'; csum_ref[1] = dcomplex_create(5.121601045346E+02, 5.117395998266E+02); csum_ref[2] = dcomplex_create(5.120905403678E+02, 5.118614716182E+02); csum_ref[3] = dcomplex_create(5.120623229306E+02, 5.119074203747E+02); csum_ref[4] = dcomplex_create(5.120438418997E+02, 5.119345900733E+02); csum_ref[5] = dcomplex_create(5.120311521872E+02, 5.119551325550E+02); csum_ref[6] = dcomplex_create(5.120226088809E+02, 5.119720179919E+02); csum_ref[7] = dcomplex_create(5.120169296534E+02, 5.119861371665E+02); csum_ref[8] = dcomplex_create(5.120131225172E+02, 5.119979364402E+02); csum_ref[9] = dcomplex_create(5.120104767108E+02, 5.120077674092E+02); csum_ref[10] = dcomplex_create(5.120085127969E+02, 5.120159443121E+02); csum_ref[11] = dcomplex_create(5.120069224127E+02, 5.120227453670E+02); csum_ref[12] = dcomplex_create(5.120055158164E+02, 5.120284096041E+02); csum_ref[13] = dcomplex_create(5.120041820159E+02, 5.120331373793E+02); csum_ref[14] = dcomplex_create(5.120028605402E+02, 5.120370938679E+02); csum_ref[15] = dcomplex_create(5.120015223011E+02, 5.120404138831E+02); csum_ref[16] = dcomplex_create(5.120001570022E+02, 5.120432068837E+02); csum_ref[17] = dcomplex_create(5.119987650555E+02, 5.120455615860E+02); csum_ref[18] = dcomplex_create(5.119973525091E+02, 5.120475499442E+02); csum_ref[19] = dcomplex_create(5.119959279472E+02, 5.120492304629E+02); csum_ref[20] = dcomplex_create(5.119945006558E+02, 5.120506508902E+02); csum_ref[21] = dcomplex_create(5.119930795911E+02, 5.120518503782E+02); csum_ref[22] = dcomplex_create(5.119916728462E+02, 5.120528612016E+02); csum_ref[23] = dcomplex_create(5.119902874185E+02, 5.120537101195E+02); csum_ref[24] = dcomplex_create(5.119889291565E+02, 5.120544194514E+02); csum_ref[25] = dcomplex_create(5.119876028049E+02, 5.120550079284E+02); } if(*class_npb != 'U'){ *verified = TRUE; for(i = 1; i <= nt; i++){ err = dcomplex_abs(dcomplex_div(dcomplex_sub(sums[i], csum_ref[i]), csum_ref[i])); if(!(err <= epsilon)){ *verified = FALSE; break; } } } if(*class_npb != 'U'){ if(*verified){ printf(" Result verification successful\n"); }else{ printf(" Result verification failed\n"); } } printf(" class_npb = %c\n", *class_npb); } __device__ void vranlc_device(int n, double* x_seed, double a, double y[]){ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; x = *x_seed; for(i=0; i<n; i++){ t1 = R23 * x; x1 = (int)t1; x2 = x - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); x = t3 - T46 * t4; y[i] = R46 * x; } *x_seed = x; }
bb36296fffe1f64e45ee8b854c6ed764691bbbb0.hip
// !!! This is a file automatically generated by hipify!!! /* * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm * Base case is fft4 */ // C includes #include <stdio.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <string.h> // CUDA includes #include <hip/hip_runtime.h> #include <rocblas.h> #include <hip/hip_fp16.h> #include "nvidia_helper/checkCudaErrors.h" // Matrix and vector #include "helper/my_vector.h" #include "helper/my_matrix.h" #include "helper/my_const.h" // Utility programs #include "util/fp32_to_fp16.h" #include "util/fourier_matrix_4.h" #include "util/fft4.h" #define PI 3.14159265 const float UPPER_BOUND = 1.0f; const int BATCH = 1; const int SIZE = 256; extern fft::MatrixH F4_re; extern fft::MatrixH F4_im; float* buffer; __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im) { /* * Multifly every element of the input matrix with twiddle factor * Block and thread layout should be 2D * Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j) * Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j) * */ // Calculate position (0 based) int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < m && j < n){ // Per-thread local variables int index = j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im) { FFT_S fft_status; if (N == 4) { return fft4(B, X_re, X_im, FX_re, FX_im); } // cublas variable declaration hipblasStatus_t status; hipblasHandle_t handle; // Scaling variables float alpha = 1.0f, beta = 0.0f; // Temporary variables for intermediate result swapping float* temp; // Initialize cublas status = hipblasCreate(&handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } // Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B FX_re.width = N / 4 * B; FX_re.height = 4; FX_im.width = N / 4 * B; FX_im.height = 4; // Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4, &beta, X_re.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension (Note that the transpose happens batch-wisely) FX_re.height = N / 4; FX_re.width = B * 4; //// Imaginary for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4, &beta, X_im.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = N / 4; FX_im.width = B * 4; hipDeviceSynchronize(); // Recursively call gfft function, not! using buffer matrix //// Call gfft, store result in buffer matrix fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplication with twiddle factors //// Set grid and block size dim3 threadsPerBlock(4, 16); dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough //// Call kernel function for (int j = 0; j < B; j++){ hipLaunchKernelGGL(( multiply_twiddle), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N); } hipDeviceSynchronize(); // Transpose the matrix again // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4, &beta, FX_re.array + j * N, N/4, buffer + j * N, 4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension, note that the transpose happens per batch FX_re.height = 4; FX_re.width = N / 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4, &beta, FX_im.array + j * N, N/4, buffer + j * N, 4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = 4; FX_im.width = N / 4 * B; hipDeviceSynchronize(); // Call fft4, not! using buffer matrix //// Call fft4, store result in buffer matrix fft_status = fft4(N / 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n"); return FFT_FAILURE; } // Do the final transpose to get the output //// Real matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4, &beta, FX_re.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4, &beta, FX_im.array + j * N, 4, buffer + j * N, N/4); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; hipDeviceSynchronize(); // Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B FX_re.width = B; FX_re.height = N; FX_im.width = B; FX_im.height = N; // Shutdown cublas status = hipblasDestroy(handle); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } return FFT_SUCCESS; } int main() { int mem_size; // allocate unified memory for input matrix fft::MatrixF input_re; input_re.width = BATCH; input_re.height = SIZE; mem_size = input_re.width * input_re.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(input_re.array), mem_size)); fft::MatrixF input_im; input_im.width = BATCH; input_im.height = SIZE; mem_size = input_im.width * input_im.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(input_im.array), mem_size)); // Initialize the input matrix srand(time(NULL)); printf("The input is: \n"); for (int j = 1; j <= BATCH; j++){ printf("Vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j)); } printf("\n"); } // allocate unified memory for output matrix fft::MatrixF output_re; output_re.width = BATCH; output_re.height = SIZE; mem_size = output_re.width * output_re.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(output_re.array), mem_size)); fft::MatrixF output_im; output_im.width = BATCH; output_im.height = SIZE; mem_size = output_im.width * output_im.height * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &(output_im.array), mem_size)); // allocate unified memory for the buffer (array of float) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(hipMallocManaged((void **) &buffer, mem_size)); FFT_S status; // Initialize Fourier matrix status = init_F4(); if (status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n"); return FFT_FAILURE; } // Call gfft function status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im); if (status != FFT_SUCCESS){ printf("Error in running fft algorithm\n"); exit(1); } printf("Result: \n"); for (int j = 1; j <= BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j)); } } checkCudaErrors(hipFree(input_re.array)); checkCudaErrors(hipFree(input_im.array)); checkCudaErrors(hipFree(output_re.array)); checkCudaErrors(hipFree(output_im.array)); return 0; }
bb36296fffe1f64e45ee8b854c6ed764691bbbb0.cu
/* * Implementing the FFT algorithm for general input * Input should be fp32 vectors with size equals to the power of 4 * Number of vectors is given by BATCH (B) * Recursive algorithm * Base case is fft4 */ // C includes #include <stdio.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <string.h> // CUDA includes #include <cuda_runtime.h> #include <cublas_v2.h> #include <cuda_fp16.h> #include "nvidia_helper/checkCudaErrors.h" // Matrix and vector #include "helper/my_vector.h" #include "helper/my_matrix.h" #include "helper/my_const.h" // Utility programs #include "util/fp32_to_fp16.h" #include "util/fourier_matrix_4.h" #include "util/fft4.h" #define PI 3.14159265 const float UPPER_BOUND = 1.0f; const int BATCH = 1; const int SIZE = 256; extern fft::MatrixH F4_re; extern fft::MatrixH F4_im; float* buffer; __global__ void multiply_twiddle(int N, int m, int n, float* matrix_re, float* matrix_im) { /* * Multifly every element of the input matrix with twiddle factor * Block and thread layout should be 2D * Re.element(i, j) [0 based] = xre * cos(2pi/N * i * j) + xim * sin(2pi/N * i * j) * Im.element(i, j) [0 based] = -xre * sin(2pi/N * i * j) + xim * cos(2pi/N * i * j) * */ // Calculate position (0 based) int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < m && j < n){ // Per-thread local variables int index = j * m + i; float tw_re = cos(2 * PI / N * i * j); float tw_im = sin(2 * PI / N * i * j); float result_re = matrix_re[index] * tw_re + matrix_im[index] * tw_im; float result_im = -1.0f * matrix_re[index] * tw_im + matrix_im[index] * tw_re; matrix_re[index] = result_re; matrix_im[index] = result_im; } } FFT_S gfft(int N, int B, fft::MatrixF& X_re, fft::MatrixF& X_im, fft::MatrixF& FX_re, fft::MatrixF& FX_im) { FFT_S fft_status; if (N == 4) { return fft4(B, X_re, X_im, FX_re, FX_im); } // cublas variable declaration cublasStatus_t status; cublasHandle_t handle; // Scaling variables float alpha = 1.0f, beta = 0.0f; // Temporary variables for intermediate result swapping float* temp; // Initialize cublas status = cublasCreate(&handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS initialization error\n"); return FFT_FAILURE; } // Reshape the output matrix: (N -(Reshape)->4*(N/4)) * B FX_re.width = N / 4 * B; FX_re.height = 4; FX_im.width = N / 4 * B; FX_im.height = 4; // Transpose input matrix: (4*(N/4) -(Transpose)-> (N/4)*4) * B // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_re.array + j * N, 4, &beta, X_re.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose real input).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension (Note that the transpose happens batch-wisely) FX_re.height = N / 4; FX_re.width = B * 4; //// Imaginary for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, X_im.array + j * N, 4, &beta, X_im.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (transpose imaginary input).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = N / 4; FX_im.width = B * 4; cudaDeviceSynchronize(); // Recursively call gfft function, not! using buffer matrix //// Call gfft, store result in buffer matrix fft_status = gfft(N / 4, 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (recursively call gfft).\n"); return FFT_FAILURE; } // Multiplication with twiddle factors //// Set grid and block size dim3 threadsPerBlock(4, 16); dim3 numBlocks(1, (N + 63)/64); // Make sure blocks are enough //// Call kernel function for (int j = 0; j < B; j++){ multiply_twiddle<<<numBlocks, threadsPerBlock>>>(N, N/4, 4, FX_re.array + j * N, FX_im.array + j * N); } cudaDeviceSynchronize(); // Transpose the matrix again // Store temporary result first in buffer, then in FX_re.array and FX_im.array //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_re.array + j * N, N/4, &beta, FX_re.array + j * N, N/4, buffer + j * N, 4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension, note that the transpose happens per batch FX_re.height = 4; FX_re.width = N / 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, 4, N/4, &alpha, FX_im.array + j * N, N/4, &beta, FX_im.array + j * N, N/4, buffer + j * N, 4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (intermediate transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_im.height = 4; FX_im.width = N / 4 * B; cudaDeviceSynchronize(); // Call fft4, not! using buffer matrix //// Call fft4, store result in buffer matrix fft_status = fft4(N / 4 * B, FX_re, FX_im, FX_re, FX_im); if (fft_status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Execution error (combine step calling fft4).\n"); return FFT_FAILURE; } // Do the final transpose to get the output //// Real matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_re.array + j * N, 4, &beta, FX_re.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose real).\n"); return FFT_FAILURE; } } ////// Swap FX_re.array and buffer to store the transposition result in FX_re.array temp = FX_re.array; FX_re.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; //// Imaginary matrix for (int j = 0; j < B; j++){ status = cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_T, N/4, 4, &alpha, FX_im.array + j * N, 4, &beta, FX_im.array + j * N, 4, buffer + j * N, N/4); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! CUBLAS kernel execution error (final transpose imaginary).\n"); return FFT_FAILURE; } } ////// Swap FX_im.array and buffer to store the transposition result in FX_im.array temp = FX_im.array; FX_im.array = buffer; buffer = temp; ////// Set dimension FX_re.height = N / 4; FX_re.width = 4 * B; cudaDeviceSynchronize(); // Reshape back input and output matrix: (4*(N/4) --Reshape--> N) * B FX_re.width = B; FX_re.height = N; FX_im.width = B; FX_im.height = N; // Shutdown cublas status = cublasDestroy(handle); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! shutdown error (A)\n"); return FFT_FAILURE; } return FFT_SUCCESS; } int main() { int mem_size; // allocate unified memory for input matrix fft::MatrixF input_re; input_re.width = BATCH; input_re.height = SIZE; mem_size = input_re.width * input_re.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(input_re.array), mem_size)); fft::MatrixF input_im; input_im.width = BATCH; input_im.height = SIZE; mem_size = input_im.width * input_im.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(input_im.array), mem_size)); // Initialize the input matrix srand(time(NULL)); printf("The input is: \n"); for (int j = 1; j <= BATCH; j++){ printf("Vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ input_re.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; input_im.element(i, j) = (float)rand() / (float)(RAND_MAX) * 2 * UPPER_BOUND - UPPER_BOUND; printf("X[%d] = (%.10f, %.10f) \n", i, input_re.element(i, j), input_im.element(i, j)); } printf("\n"); } // allocate unified memory for output matrix fft::MatrixF output_re; output_re.width = BATCH; output_re.height = SIZE; mem_size = output_re.width * output_re.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(output_re.array), mem_size)); fft::MatrixF output_im; output_im.width = BATCH; output_im.height = SIZE; mem_size = output_im.width * output_im.height * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &(output_im.array), mem_size)); // allocate unified memory for the buffer (array of float) mem_size = SIZE * BATCH * sizeof(float); checkCudaErrors(cudaMallocManaged((void **) &buffer, mem_size)); FFT_S status; // Initialize Fourier matrix status = init_F4(); if (status != FFT_SUCCESS){ fprintf(stderr, "!!!!! Matrix initialization error (init Fourier matrix).\n"); return FFT_FAILURE; } // Call gfft function status = gfft(SIZE, BATCH, input_re, input_im, output_re, output_im); if (status != FFT_SUCCESS){ printf("Error in running fft algorithm\n"); exit(1); } printf("Result: \n"); for (int j = 1; j <= BATCH; j++){ printf("Resulting vector %d: \n", j); for (int i = 1; i <= SIZE; i++){ printf("FX[%d] = (%.10f, %.10f) \n", i, output_re.element(i, j), output_im.element(i, j)); } } checkCudaErrors(cudaFree(input_re.array)); checkCudaErrors(cudaFree(input_im.array)); checkCudaErrors(cudaFree(output_re.array)); checkCudaErrors(cudaFree(output_im.array)); return 0; }
1dde17eb756fd3cdcebbb746f1c062583db5a43d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // @file simpooling_gpu.cu // @brief Simmetry Pooling block implementation (gpu) // @author Ivn Gonzlez Daz /* Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "simpooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* sim_pooling_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void simpooling_kernel (T* pooled, const T* data, const int poolRings, const int poolAngles, const int poolVolume, const int rings, const int angles, const int depth, const int* idx_init) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < poolVolume) { int pz = pooledIndex / (poolAngles*poolRings) ; int relLoc = pooledIndex - pz*poolAngles*poolRings; int pa = relLoc / poolRings ; T aux; data += pz * (rings*angles) ; T scale=T(1.0)/T(rings*poolAngles); int * idx= new int[2*poolAngles]; //We shift the matrix to the current pa for (int x1 = 0; x1 < angles; ++x1){ idx[x1]=idx_init[x1]+pa; if(idx[x1]>=angles) idx[x1]=idx[x1]-angles; } pooled[pooledIndex]=0; //Only the half of the angles for(int x1 = 0; x1 < poolAngles; ++x1) { //For each ring we compute the differences and accumulate for (int y1 = 0; y1 < rings; ++y1) { //Compute and accumulate the difference among the sectors aux=data[idx[2*x1]*rings + y1]-data[idx[2*x1+1]*rings + y1]; pooled[pooledIndex]+=aux*aux*scale; } } free(idx); } } /* ---------------------------------------------------------------- */ /* sim_pooling_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void simpooling_backward_kernel (T* derData, const T* data, const T* derPooled, const int poolRings, const int poolAngles, const int poolVolume, const int rings, const int angles, const int depth, const int* idx_init) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < poolVolume) { int pz = pooledIndex / (poolAngles*poolRings) ; int relLoc = pooledIndex - pz*poolAngles*poolRings; int pa = relLoc / poolRings ; data += pz * rings * angles; derData += pz * rings * angles ; T scale=T(1.0)/T(rings*poolAngles); T sign=1; T aux; //Matrix of indexes for simmetry int * idx= new int[2*poolAngles]; //We shift the matrix to the current pa for (int x1 = 0; x1 < angles; ++x1){ idx[x1]=idx_init[x1]+pa; if(idx[x1]>=angles) idx[x1]=idx[x1]-angles; } //Only the half of the angles for(int x1 = 0; x1 < poolAngles; ++x1) { //For each ring we compute the differences and accumulate for (int y1 = 0; y1 < rings; ++y1) { //Depending on the sign of the difference => We have to change the values of the mask //In forward we simply do "abs()" => here we need to know the sign to update things aux=data[idx[2*x1]*rings + y1]-data[idx[2*x1+1]*rings + y1]; //Update the derivative of the z with respect to the data atomicAdd(derData + idx[2*x1]*rings + y1, aux*derPooled[pooledIndex]*scale) ; atomicAdd(derData + idx[2*x1+1]*rings + y1, -aux*derPooled[pooledIndex]*scale) ; } } free(idx); } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct simpooling<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t rings, size_t angles, size_t depth) { int poolAngles = angles/2; int poolRings = 1 ; int poolVolume = poolRings * poolAngles * depth ; //Matrix of indexes for simmetry int * idx_init,* idx_init_gpu; idx_init = (int *)malloc(2*poolAngles*sizeof(int)); hipMalloc(&idx_init_gpu, 2*poolAngles*sizeof(int)); for (int x = 0; x < poolAngles; ++x) { idx_init[2*x]=x; idx_init[2*x+1]=angles-1-x; } hipMemcpy(idx_init_gpu,idx_init,2*poolAngles*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( simpooling_kernel<type>) , dim3(divideAndRoundUp(poolVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, pooled, data, poolRings, poolAngles, poolVolume, rings, angles, depth, idx_init_gpu); free(idx_init); hipFree(idx_init_gpu); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t rings, size_t angles, size_t depth) { int poolAngles = angles/2; int poolRings = 1 ; int poolVolume = poolRings * poolAngles * depth ; int * idx_init,* idx_init_gpu; idx_init = (int *)malloc(2*poolAngles*sizeof(int)); hipMalloc(&idx_init_gpu, 2*poolAngles*sizeof(int)); for (int x = 0; x < poolAngles; ++x) { idx_init[2*x]=x; idx_init[2*x+1]=angles-1-x; } hipMemcpy(idx_init_gpu,idx_init,2*poolAngles*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( simpooling_backward_kernel<type>) , dim3(divideAndRoundUp(poolVolume, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0, derData, data, derPooled, poolRings, poolAngles, poolVolume, rings, angles, depth,idx_init_gpu); free(idx_init); hipFree(idx_init_gpu); hipError_t status = hipPeekAtLastError() ; return (status == hipSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // simpooling } } ; // namespace vl::impl // Instantiations template struct vl::impl::simpooling<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::simpooling<vl::VLDT_GPU, double> ; #endif
1dde17eb756fd3cdcebbb746f1c062583db5a43d.cu
// @file simpooling_gpu.cu // @brief Simmetry Pooling block implementation (gpu) // @author Iván González Díaz /* Copyright (C) 2014-16 Andrea Vedaldi and Karel Lenc. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ #include "simpooling.hpp" #include "../datacu.hpp" #include <assert.h> #include <float.h> #include <sm_20_atomic_functions.h> /* ---------------------------------------------------------------- */ /* sim_pooling_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void simpooling_kernel (T* pooled, const T* data, const int poolRings, const int poolAngles, const int poolVolume, const int rings, const int angles, const int depth, const int* idx_init) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < poolVolume) { int pz = pooledIndex / (poolAngles*poolRings) ; int relLoc = pooledIndex - pz*poolAngles*poolRings; int pa = relLoc / poolRings ; T aux; data += pz * (rings*angles) ; T scale=T(1.0)/T(rings*poolAngles); int * idx= new int[2*poolAngles]; //We shift the matrix to the current pa for (int x1 = 0; x1 < angles; ++x1){ idx[x1]=idx_init[x1]+pa; if(idx[x1]>=angles) idx[x1]=idx[x1]-angles; } pooled[pooledIndex]=0; //Only the half of the angles for(int x1 = 0; x1 < poolAngles; ++x1) { //For each ring we compute the differences and accumulate for (int y1 = 0; y1 < rings; ++y1) { //Compute and accumulate the difference among the sectors aux=data[idx[2*x1]*rings + y1]-data[idx[2*x1+1]*rings + y1]; pooled[pooledIndex]+=aux*aux*scale; } } free(idx); } } /* ---------------------------------------------------------------- */ /* sim_pooling_forward */ /* ---------------------------------------------------------------- */ template<typename T> __global__ void simpooling_backward_kernel (T* derData, const T* data, const T* derPooled, const int poolRings, const int poolAngles, const int poolVolume, const int rings, const int angles, const int depth, const int* idx_init) { int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x; if (pooledIndex < poolVolume) { int pz = pooledIndex / (poolAngles*poolRings) ; int relLoc = pooledIndex - pz*poolAngles*poolRings; int pa = relLoc / poolRings ; data += pz * rings * angles; derData += pz * rings * angles ; T scale=T(1.0)/T(rings*poolAngles); T sign=1; T aux; //Matrix of indexes for simmetry int * idx= new int[2*poolAngles]; //We shift the matrix to the current pa for (int x1 = 0; x1 < angles; ++x1){ idx[x1]=idx_init[x1]+pa; if(idx[x1]>=angles) idx[x1]=idx[x1]-angles; } //Only the half of the angles for(int x1 = 0; x1 < poolAngles; ++x1) { //For each ring we compute the differences and accumulate for (int y1 = 0; y1 < rings; ++y1) { //Depending on the sign of the difference => We have to change the values of the mask //In forward we simply do "abs()" => here we need to know the sign to update things aux=data[idx[2*x1]*rings + y1]-data[idx[2*x1+1]*rings + y1]; //Update the derivative of the z with respect to the data atomicAdd(derData + idx[2*x1]*rings + y1, aux*derPooled[pooledIndex]*scale) ; atomicAdd(derData + idx[2*x1+1]*rings + y1, -aux*derPooled[pooledIndex]*scale) ; } } free(idx); } } /* ---------------------------------------------------------------- */ /* Interface */ /* ---------------------------------------------------------------- */ namespace vl { namespace impl { template <typename type> struct simpooling<vl::VLDT_GPU, type> { static vl::ErrorCode forward(type* pooled, type const* data, size_t rings, size_t angles, size_t depth) { int poolAngles = angles/2; int poolRings = 1 ; int poolVolume = poolRings * poolAngles * depth ; //Matrix of indexes for simmetry int * idx_init,* idx_init_gpu; idx_init = (int *)malloc(2*poolAngles*sizeof(int)); cudaMalloc(&idx_init_gpu, 2*poolAngles*sizeof(int)); for (int x = 0; x < poolAngles; ++x) { idx_init[2*x]=x; idx_init[2*x+1]=angles-1-x; } cudaMemcpy(idx_init_gpu,idx_init,2*poolAngles*sizeof(int),cudaMemcpyHostToDevice); simpooling_kernel<type> <<< divideAndRoundUp(poolVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (pooled, data, poolRings, poolAngles, poolVolume, rings, angles, depth, idx_init_gpu); free(idx_init); cudaFree(idx_init_gpu); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } static vl::ErrorCode backward(type* derData, type const* data, type const* derPooled, size_t rings, size_t angles, size_t depth) { int poolAngles = angles/2; int poolRings = 1 ; int poolVolume = poolRings * poolAngles * depth ; int * idx_init,* idx_init_gpu; idx_init = (int *)malloc(2*poolAngles*sizeof(int)); cudaMalloc(&idx_init_gpu, 2*poolAngles*sizeof(int)); for (int x = 0; x < poolAngles; ++x) { idx_init[2*x]=x; idx_init[2*x+1]=angles-1-x; } cudaMemcpy(idx_init_gpu,idx_init,2*poolAngles*sizeof(int),cudaMemcpyHostToDevice); simpooling_backward_kernel<type> <<< divideAndRoundUp(poolVolume, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derData, data, derPooled, poolRings, poolAngles, poolVolume, rings, angles, depth,idx_init_gpu); free(idx_init); cudaFree(idx_init_gpu); cudaError_t status = cudaPeekAtLastError() ; return (status == cudaSuccess) ? vl::VLE_Success : vl::VLE_Cuda ; } } ; // simpooling } } ; // namespace vl::impl // Instantiations template struct vl::impl::simpooling<vl::VLDT_GPU, float> ; #ifdef ENABLE_DOUBLE template struct vl::impl::simpooling<vl::VLDT_GPU, double> ; #endif
a7193496fa813517ba229f7f1bc91a136b0ab1b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Prerequisites.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// template<class T> __global__ void ReduceAddKernel(T* d_input, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceMeanKernel(T* d_input, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceMeanWeightedKernel(T* d_input, tfloat* d_inputweights, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceOrKernel(T* d_input, T* d_output, uint nvectors, uint vectorlength, uint batch); //////////// //Addition// //////////// template<class T> void d_ReduceAdd(T* d_input, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = min(NextMultipleOf(vectorlength, 32), 128); dim3 grid = dim3(min((vectorlength + TpB - 1) / TpB, 1024), batch); ReduceAddKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength); } template void d_ReduceAdd<char>(char* d_input, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<short>(short* d_input, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<int>(int* d_input, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<uint>(uint* d_input, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<float>(float* d_input, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<double>(double* d_input, double* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<float2>(float2* d_input, float2* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceAddKernel(T* d_input, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (int id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (int n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[id] = sum; } } template<> __global__ void ReduceAddKernel<float2>(float2* d_input, float2* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (int id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float2 sum = make_float2(0.0f, 0.0f); for (int n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[id] = sum; } } //////// //Mean// //////// template<class T> void d_ReduceMean(T* d_input, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = tmin(NextMultipleOf(vectorlength, 32), 256); dim3 grid = dim3(tmin((vectorlength + TpB - 1) / TpB, 2048), batch); ReduceMeanKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength); } template void d_ReduceMean<char>(char* d_input, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<short>(short* d_input, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<int>(int* d_input, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<uint>(uint* d_input, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<half>(half* d_input, half* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<float>(float* d_input, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<double>(double* d_input, double* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<float2>(float2* d_input, float2* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceMeanKernel(T* d_input, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (uint n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[blockIdx.y * vectorlength + id] = sum / (T)nvectors; } } template<> __global__ void ReduceMeanKernel<half>(half* d_input, half* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float sum = 0.0f; for (uint n = 0; n < nvectors; n++) sum += __half2float(d_input[n * vectorlength + id]); d_output[blockIdx.y * vectorlength + id] = __float2half(sum / (float)nvectors); } } template<> __global__ void ReduceMeanKernel<float2>(float2* d_input, float2* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float2 sum = make_float2(0.0f, 0.0f); for (uint n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[blockIdx.y * vectorlength + id] = sum / (float)nvectors; } } ///////////////// //Mean weighted// ///////////////// template<class T> void d_ReduceMeanWeighted(T* d_input, tfloat* d_inputweights, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = tmin(NextMultipleOf(vectorlength, 32), 256); dim3 grid = dim3(tmin((vectorlength + TpB - 1) / TpB, 2048), batch); ReduceMeanWeightedKernel<T> << <grid, TpB >> > (d_input, d_inputweights, d_output, nvectors, vectorlength); } template void d_ReduceMeanWeighted<char>(char* d_input, tfloat* d_inputweights, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<short>(short* d_input, tfloat* d_inputweights, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<int>(int* d_input, tfloat* d_inputweights, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<uint>(uint* d_input, tfloat* d_inputweights, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<float>(float* d_input, tfloat* d_inputweights, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<double>(double* d_input, tfloat* d_inputweights, double* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceMeanWeightedKernel(T* d_input, tfloat* d_inputweights, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; tfloat weightsum = 0; for (uint n = 0; n < nvectors; n++) { tfloat weight = d_inputweights[n * vectorlength + id]; weightsum += weight; sum += d_input[n * vectorlength + id] * weight; } if (weightsum != 0) d_output[blockIdx.y * vectorlength + id] = sum / weightsum; else d_output[blockIdx.y * vectorlength + id] = (T)0; } } ////// //Or// ////// template<class T> void d_ReduceOr(T* d_input, T* d_output, uint vectorlength, uint nvectors, uint batch) { int TpB = min(NextMultipleOf(nvectors, 32), 256); dim3 grid = dim3(min(vectorlength, 2048), min(batch, 32768)); ReduceOrKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength, batch); } template void d_ReduceOr<char>(char* d_input, char* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<uchar>(uchar* d_input, uchar* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<short>(short* d_input, short* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<ushort>(ushort* d_input, ushort* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<int>(int* d_input, int* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<uint>(uint* d_input, uint* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<bool>(bool* d_input, bool* d_output, uint vectorlength, uint nvectors, uint batch); template<class T> __global__ void ReduceOrKernel(T* d_input, T* d_output, uint nvectors, uint vectorlength, uint batch) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (uint b = blockIdx.y; b < batch; b += gridDim.y, d_input += gridDim.y * nvectors * vectorlength, d_output += gridDim.y * vectorlength) { for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (uint n = 0; n < nvectors; n++) sum |= d_input[n * vectorlength + id]; d_output[id] = sum; } } } }
a7193496fa813517ba229f7f1bc91a136b0ab1b8.cu
#include "Prerequisites.cuh" namespace gtom { //////////////////////////// //CUDA kernel declarations// //////////////////////////// template<class T> __global__ void ReduceAddKernel(T* d_input, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceMeanKernel(T* d_input, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceMeanWeightedKernel(T* d_input, tfloat* d_inputweights, T* d_output, int nvectors, int vectorlength); template<class T> __global__ void ReduceOrKernel(T* d_input, T* d_output, uint nvectors, uint vectorlength, uint batch); //////////// //Addition// //////////// template<class T> void d_ReduceAdd(T* d_input, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = min(NextMultipleOf(vectorlength, 32), 128); dim3 grid = dim3(min((vectorlength + TpB - 1) / TpB, 1024), batch); ReduceAddKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength); } template void d_ReduceAdd<char>(char* d_input, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<short>(short* d_input, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<int>(int* d_input, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<uint>(uint* d_input, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<float>(float* d_input, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<double>(double* d_input, double* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceAdd<float2>(float2* d_input, float2* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceAddKernel(T* d_input, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (int id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (int n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[id] = sum; } } template<> __global__ void ReduceAddKernel<float2>(float2* d_input, float2* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (int id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float2 sum = make_float2(0.0f, 0.0f); for (int n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[id] = sum; } } //////// //Mean// //////// template<class T> void d_ReduceMean(T* d_input, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = tmin(NextMultipleOf(vectorlength, 32), 256); dim3 grid = dim3(tmin((vectorlength + TpB - 1) / TpB, 2048), batch); ReduceMeanKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength); } template void d_ReduceMean<char>(char* d_input, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<short>(short* d_input, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<int>(int* d_input, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<uint>(uint* d_input, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<half>(half* d_input, half* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<float>(float* d_input, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<double>(double* d_input, double* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMean<float2>(float2* d_input, float2* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceMeanKernel(T* d_input, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (uint n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[blockIdx.y * vectorlength + id] = sum / (T)nvectors; } } template<> __global__ void ReduceMeanKernel<half>(half* d_input, half* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float sum = 0.0f; for (uint n = 0; n < nvectors; n++) sum += __half2float(d_input[n * vectorlength + id]); d_output[blockIdx.y * vectorlength + id] = __float2half(sum / (float)nvectors); } } template<> __global__ void ReduceMeanKernel<float2>(float2* d_input, float2* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { float2 sum = make_float2(0.0f, 0.0f); for (uint n = 0; n < nvectors; n++) sum += d_input[n * vectorlength + id]; d_output[blockIdx.y * vectorlength + id] = sum / (float)nvectors; } } ///////////////// //Mean weighted// ///////////////// template<class T> void d_ReduceMeanWeighted(T* d_input, tfloat* d_inputweights, T* d_output, int vectorlength, int nvectors, int batch) { int TpB = tmin(NextMultipleOf(vectorlength, 32), 256); dim3 grid = dim3(tmin((vectorlength + TpB - 1) / TpB, 2048), batch); ReduceMeanWeightedKernel<T> << <grid, TpB >> > (d_input, d_inputweights, d_output, nvectors, vectorlength); } template void d_ReduceMeanWeighted<char>(char* d_input, tfloat* d_inputweights, char* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<short>(short* d_input, tfloat* d_inputweights, short* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<int>(int* d_input, tfloat* d_inputweights, int* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<uint>(uint* d_input, tfloat* d_inputweights, uint* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<float>(float* d_input, tfloat* d_inputweights, float* d_output, int vectorlength, int nvectors, int batch); template void d_ReduceMeanWeighted<double>(double* d_input, tfloat* d_inputweights, double* d_output, int vectorlength, int nvectors, int batch); template<class T> __global__ void ReduceMeanWeightedKernel(T* d_input, tfloat* d_inputweights, T* d_output, int nvectors, int vectorlength) { d_input += blockIdx.y * nvectors * vectorlength; for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; tfloat weightsum = 0; for (uint n = 0; n < nvectors; n++) { tfloat weight = d_inputweights[n * vectorlength + id]; weightsum += weight; sum += d_input[n * vectorlength + id] * weight; } if (weightsum != 0) d_output[blockIdx.y * vectorlength + id] = sum / weightsum; else d_output[blockIdx.y * vectorlength + id] = (T)0; } } ////// //Or// ////// template<class T> void d_ReduceOr(T* d_input, T* d_output, uint vectorlength, uint nvectors, uint batch) { int TpB = min(NextMultipleOf(nvectors, 32), 256); dim3 grid = dim3(min(vectorlength, 2048), min(batch, 32768)); ReduceOrKernel<T> << <grid, TpB >> > (d_input, d_output, nvectors, vectorlength, batch); } template void d_ReduceOr<char>(char* d_input, char* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<uchar>(uchar* d_input, uchar* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<short>(short* d_input, short* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<ushort>(ushort* d_input, ushort* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<int>(int* d_input, int* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<uint>(uint* d_input, uint* d_output, uint vectorlength, uint nvectors, uint batch); template void d_ReduceOr<bool>(bool* d_input, bool* d_output, uint vectorlength, uint nvectors, uint batch); template<class T> __global__ void ReduceOrKernel(T* d_input, T* d_output, uint nvectors, uint vectorlength, uint batch) { d_input += blockIdx.y * nvectors * vectorlength; d_output += blockIdx.y * vectorlength; for (uint b = blockIdx.y; b < batch; b += gridDim.y, d_input += gridDim.y * nvectors * vectorlength, d_output += gridDim.y * vectorlength) { for (uint id = blockIdx.x * blockDim.x + threadIdx.x; id < vectorlength; id += gridDim.x * blockDim.x) { T sum = (T)0; for (uint n = 0; n < nvectors; n++) sum |= d_input[n * vectorlength + id]; d_output[id] = sum; } } } }
5d4557c3157d89ee16bac757e7f29c656bf9811e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <sys/time.h> #define CHECK(call)\ {\ hipError_t error = call;\ if (error != hipSuccess){\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, hipGetErrorString(error));\ exit(-10*error);\ }\ }\ double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1e-6); } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; ++i) { if (abs(hostRef[i] - gpuRef[i]) > epsilon){ match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match){ printf("Arrays match.\n\n"); } else{ printf("Arrays do not match.\n\n"); } } void initialInt(int *ip, int size){ for (int i = 0; i < size; ++i) { ip[i] = i; } } void initialData(float *ip, const int size){ for (int i = 0; i < size; ++i) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } return; } __global__ void initialDataGPU(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ ip[idx] = (float)(size * idx) / 10.0f; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\nMatrix: (%d,%d)\n", nx, ny); for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny){ float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } } __global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny){ MatC[idx] = MatA[idx] + MatB[idx]; } } __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx){ for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } } __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; if (ix < nx && iy < ny){ unsigned int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; struct hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); int nx = 1<<14; int ny = 1<<14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d\n",nx,ny); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); float *d_MatA, *d_MatB, *d_MatC; hipMalloc((void **)&d_MatA, nBytes); hipMalloc((void **)&d_MatB, nBytes); hipMalloc((void **)&d_MatC, nBytes); int dimx = 256; int dimy = 1; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, ny); double iStart = cpuSecond(); hipLaunchKernelGGL(( initialDataGPU), dim3(nxy), dim3(1), 0, 0, d_MatA, nx, ny); hipLaunchKernelGGL(( initialDataGPU), dim3(nxy), dim3(1), 0, 0, d_MatB, nx, ny); double iElaps = cpuSecond() - iStart; printf("initial on GPU use %f\n", iElaps); iStart = cpuSecond(); initialData(h_A, nxy); initialData(h_B, nxy); iElaps = cpuSecond() - iStart; printf("initial use %f\n", iElaps); iStart = cpuSecond(); // sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); // sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); hipLaunchKernelGGL(( sumMatrixOnGPUMix), dim3(grid), dim3(block), 0, 0, d_MatA, d_MatB, d_MatC, nx, ny); hipDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x, grid.y, block.x, block.y, iElaps); hipMemcpy(h_A, d_MatA, nBytes, hipMemcpyDeviceToHost); hipMemcpy(h_B, d_MatB, nBytes, hipMemcpyDeviceToHost); hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost); iStart = cpuSecond(); sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); iElaps = cpuSecond() - iStart; printf("run on host use %f\n", iElaps); checkResult(hostRef, gpuRef, nxy); hipFree(d_MatA); hipFree(d_MatB); hipFree(d_MatC); free(h_A); free(h_B); free(hostRef); free(gpuRef); hipDeviceReset(); return (0); }
5d4557c3157d89ee16bac757e7f29c656bf9811e.cu
#include <stdio.h> #include <cuda_runtime.h> #include <sys/time.h> #define CHECK(call)\ {\ cudaError_t error = call;\ if (error != cudaSuccess){\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));\ exit(-10*error);\ }\ }\ double cpuSecond(){ struct timeval tp; gettimeofday(&tp, NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1e-6); } void checkResult(float *hostRef, float *gpuRef, const int N){ double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; ++i) { if (abs(hostRef[i] - gpuRef[i]) > epsilon){ match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match){ printf("Arrays match.\n\n"); } else{ printf("Arrays do not match.\n\n"); } } void initialInt(int *ip, int size){ for (int i = 0; i < size; ++i) { ip[i] = i; } } void initialData(float *ip, const int size){ for (int i = 0; i < size; ++i) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } return; } __global__ void initialDataGPU(float *ip, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; int size = nx * ny; if (idx < size){ ip[idx] = (float)(size * idx) / 10.0f; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\nMatrix: (%d,%d)\n", nx, ny); for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } void sumMatrixOnHost(float *A, float *B, float *C, const int nx, const int ny){ float *ia = A; float *ib = B; float *ic = C; for (int iy = 0; iy < ny; ++iy) { for (int ix = 0; ix < nx; ++ix) { ic[ix] = ia[ix] + ib[ix]; } ia += nx; ib += nx; ic += nx; } } __global__ void sumMatrixOnGPU2D(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; if (ix < nx && iy < ny){ MatC[idx] = MatA[idx] + MatB[idx]; } } __global__ void sumMatrixOnGPU1D(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < nx){ for (int iy = 0; iy < ny; iy++) { int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } } __global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny){ unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; unsigned int iy = blockIdx.y; if (ix < nx && iy < ny){ unsigned int idx = iy * nx + ix; MatC[idx] = MatA[idx] + MatB[idx]; } } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); int dev = 0; struct cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); int nx = 1<<14; int ny = 1<<14; int nxy = nx * ny; int nBytes = nxy * sizeof(float); printf("Matrix size: nx %d ny %d\n",nx,ny); float *h_A, *h_B, *hostRef, *gpuRef; h_A = (float *)malloc(nBytes); h_B = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); float *d_MatA, *d_MatB, *d_MatC; cudaMalloc((void **)&d_MatA, nBytes); cudaMalloc((void **)&d_MatB, nBytes); cudaMalloc((void **)&d_MatC, nBytes); int dimx = 256; int dimy = 1; dim3 block(dimx, dimy); dim3 grid((nx+block.x-1)/block.x, ny); double iStart = cpuSecond(); initialDataGPU<<<nxy, 1>>>(d_MatA, nx, ny); initialDataGPU<<<nxy, 1>>>(d_MatB, nx, ny); double iElaps = cpuSecond() - iStart; printf("initial on GPU use %f\n", iElaps); iStart = cpuSecond(); initialData(h_A, nxy); initialData(h_B, nxy); iElaps = cpuSecond() - iStart; printf("initial use %f\n", iElaps); iStart = cpuSecond(); // sumMatrixOnGPU2D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); // sumMatrixOnGPU1D<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); sumMatrixOnGPUMix<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx, ny); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %f sec\n", grid.x, grid.y, block.x, block.y, iElaps); cudaMemcpy(h_A, d_MatA, nBytes, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_MatB, nBytes, cudaMemcpyDeviceToHost); cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost); iStart = cpuSecond(); sumMatrixOnHost(h_A, h_B, hostRef, nx, ny); iElaps = cpuSecond() - iStart; printf("run on host use %f\n", iElaps); checkResult(hostRef, gpuRef, nxy); cudaFree(d_MatA); cudaFree(d_MatB); cudaFree(d_MatC); free(h_A); free(h_B); free(hostRef); free(gpuRef); cudaDeviceReset(); return (0); }
cb78008f3382d02e001bd52f96838ecb6f9aff6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> #include "config.h" #include "util.h" __device__ void set_pixel(unsigned char* image, int width, int x, int y, unsigned char* c) { image[4 * width * y + 4 * x + 0] = c[0]; image[4 * width * y + 4 * x + 1] = c[1]; image[4 * width * y + 4 * x + 2] = c[2]; image[4 * width * y + 4 * x + 3] = 255; } __global__ void kernel(unsigned char* image, unsigned char* colormap) { int row, col, index, iteration; double c_re, c_im, x, y, x_new; int width = WIDTH; int height = HEIGHT; int maxIterations = MAX_ITERATION; index = blockIdx.x * blockDim.x + threadIdx.x; row = index / width; col = index % width; if (row >= height || col >= width || index >= LENGTH) { return; } c_re = (col - width / 2.0) * 4.0 / width; c_im = (row - height / 2.0) * 4.0 / width; x = 0, y = 0; iteration = 0; while (x * x + y * y <= 4 && iteration < maxIterations) { x_new = x * x - y * y + c_re; y = 2 * x * y + c_im; x = x_new; iteration++; } if (iteration > maxIterations) { iteration = maxIterations; } set_pixel(image, width, col, row, &colormap[iteration * 3]); } void onFail(const hipError_t& err, const char message[]) { if (err != hipSuccess) { fprintf(stderr, "%s, Error: %s\n" ,message ,hipGetErrorString(err)); exit(EXIT_FAILURE); } } int main(void) { unsigned char* colormapCuda, *imageCuda; double times[REPEAT]; struct timeb start, end; int i, r; char path[255]; int colormapSize = (MAX_ITERATION + 1) * 3; int imageSize = LENGTH * 4; unsigned char* colormap = (unsigned char*)malloc(colormapSize); unsigned char* image = (unsigned char*)malloc(imageSize); init_colormap(MAX_ITERATION, colormap); hipError_t err = hipSuccess; err = hipSetDevice(0); onFail(err, "Failed to set device!"); //int ndev; //hipDeviceProp_t p; //err = hipGetDeviceCount(&ndev); //onFail(err, "Failed to hipGetDeviceCount!"); //for (i = 0; i < ndev; i++) //{ // err = hipGetDeviceProperties(&p, i); // onFail(err, "Failed hipGetDeviceProperties!"); // printf("Name: %s\n", p.name); // printf("Compute capability: %d.%d\n", p.major, p.minor); // printf("Max threads/block: %d\n", p.maxThreadsPerBlock); // printf("Max block size: %d x %d x %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]); // printf("Max grid size: %d x %d x %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]); // printf("Warp size: %d\n", p.warpSize); //} for (r = 0; r < REPEAT; r++) { memset(image, 0, imageSize); ftime(&start); err = hipMalloc(&colormapCuda, colormapSize * sizeof(unsigned char)); onFail(err, "Failed to hipMalloc colormapCuda!"); err = hipMalloc(&imageCuda, imageSize * sizeof(unsigned char)); onFail(err, "Failed to hipMalloc imageCuda!"); err = hipMemcpy(colormapCuda, colormap, colormapSize * sizeof(unsigned char), hipMemcpyHostToDevice); onFail(err, "Failed to hipMemcpy hostToDevice colormapCuda!"); err = hipMemcpy(imageCuda, image, imageSize * sizeof(unsigned char), hipMemcpyHostToDevice); onFail(err, "Failed to hipMemcpy hostToDevice imageCuda!"); std::cout << "kernel " << GRID_SIZE << ", " << BLOCK_SIZE << " LEN: " << LENGTH <<"\n"; kernel << <GRID_SIZE, BLOCK_SIZE >> > (imageCuda, colormapCuda); err = hipGetLastError(); onFail(err, "Failed to launch kernel!"); err = hipDeviceSynchronize(); onFail(err, "Failed to synchronize"); err = hipMemcpy(image, imageCuda, imageSize * sizeof(unsigned char), hipMemcpyDeviceToHost); onFail(err, "Failed to hipMemcpy deviceToHost imageCuda!"); hipFree(colormapCuda); hipFree(imageCuda); ftime(&end); times[r] = end.time - start.time + ((double)end.millitm - (double)start.millitm) / 1000.0; sprintf(path, IMAGE, "gpu", r); save_image(path, image, WIDTH, HEIGHT); progress("gpu", r, times[r]); } report("gpu", times); err = hipDeviceReset(); onFail(err, "Failed to hipDeviceReset!"); free(image); free(colormap); hipFree(colormapCuda); hipFree(imageCuda); return 0; }
cb78008f3382d02e001bd52f96838ecb6f9aff6b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <sys/timeb.h> #include "config.h" #include "util.h" __device__ void set_pixel(unsigned char* image, int width, int x, int y, unsigned char* c) { image[4 * width * y + 4 * x + 0] = c[0]; image[4 * width * y + 4 * x + 1] = c[1]; image[4 * width * y + 4 * x + 2] = c[2]; image[4 * width * y + 4 * x + 3] = 255; } __global__ void kernel(unsigned char* image, unsigned char* colormap) { int row, col, index, iteration; double c_re, c_im, x, y, x_new; int width = WIDTH; int height = HEIGHT; int maxIterations = MAX_ITERATION; index = blockIdx.x * blockDim.x + threadIdx.x; row = index / width; col = index % width; if (row >= height || col >= width || index >= LENGTH) { return; } c_re = (col - width / 2.0) * 4.0 / width; c_im = (row - height / 2.0) * 4.0 / width; x = 0, y = 0; iteration = 0; while (x * x + y * y <= 4 && iteration < maxIterations) { x_new = x * x - y * y + c_re; y = 2 * x * y + c_im; x = x_new; iteration++; } if (iteration > maxIterations) { iteration = maxIterations; } set_pixel(image, width, col, row, &colormap[iteration * 3]); } void onFail(const cudaError_t& err, const char message[]) { if (err != cudaSuccess) { fprintf(stderr, "%s, Error: %s\n" ,message ,cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int main(void) { unsigned char* colormapCuda, *imageCuda; double times[REPEAT]; struct timeb start, end; int i, r; char path[255]; int colormapSize = (MAX_ITERATION + 1) * 3; int imageSize = LENGTH * 4; unsigned char* colormap = (unsigned char*)malloc(colormapSize); unsigned char* image = (unsigned char*)malloc(imageSize); init_colormap(MAX_ITERATION, colormap); cudaError_t err = cudaSuccess; err = cudaSetDevice(0); onFail(err, "Failed to set device!"); //int ndev; //cudaDeviceProp p; //err = cudaGetDeviceCount(&ndev); //onFail(err, "Failed to cudaGetDeviceCount!"); //for (i = 0; i < ndev; i++) //{ // err = cudaGetDeviceProperties(&p, i); // onFail(err, "Failed cudaGetDeviceProperties!"); // printf("Name: %s\n", p.name); // printf("Compute capability: %d.%d\n", p.major, p.minor); // printf("Max threads/block: %d\n", p.maxThreadsPerBlock); // printf("Max block size: %d x %d x %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]); // printf("Max grid size: %d x %d x %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]); // printf("Warp size: %d\n", p.warpSize); //} for (r = 0; r < REPEAT; r++) { memset(image, 0, imageSize); ftime(&start); err = cudaMalloc(&colormapCuda, colormapSize * sizeof(unsigned char)); onFail(err, "Failed to cudaMalloc colormapCuda!"); err = cudaMalloc(&imageCuda, imageSize * sizeof(unsigned char)); onFail(err, "Failed to cudaMalloc imageCuda!"); err = cudaMemcpy(colormapCuda, colormap, colormapSize * sizeof(unsigned char), cudaMemcpyHostToDevice); onFail(err, "Failed to cudaMemcpy hostToDevice colormapCuda!"); err = cudaMemcpy(imageCuda, image, imageSize * sizeof(unsigned char), cudaMemcpyHostToDevice); onFail(err, "Failed to cudaMemcpy hostToDevice imageCuda!"); std::cout << "kernel " << GRID_SIZE << ", " << BLOCK_SIZE << " LEN: " << LENGTH <<"\n"; kernel << <GRID_SIZE, BLOCK_SIZE >> > (imageCuda, colormapCuda); err = cudaGetLastError(); onFail(err, "Failed to launch kernel!"); err = cudaDeviceSynchronize(); onFail(err, "Failed to synchronize"); err = cudaMemcpy(image, imageCuda, imageSize * sizeof(unsigned char), cudaMemcpyDeviceToHost); onFail(err, "Failed to cudaMemcpy deviceToHost imageCuda!"); cudaFree(colormapCuda); cudaFree(imageCuda); ftime(&end); times[r] = end.time - start.time + ((double)end.millitm - (double)start.millitm) / 1000.0; sprintf(path, IMAGE, "gpu", r); save_image(path, image, WIDTH, HEIGHT); progress("gpu", r, times[r]); } report("gpu", times); err = cudaDeviceReset(); onFail(err, "Failed to cudaDeviceReset!"); free(image); free(colormap); cudaFree(colormapCuda); cudaFree(imageCuda); return 0; }
ebeb62577ba9ef375389056abc05067201fc508b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cf. http://stackoverflow.com/questions/19287461/compiling-code-containing-dynamic-parallelism-fails /** Compilation tip: * nvcc -arch=sm_35 -rdc=true cdpSimplePrint_00.cu -o cdpSimplePrint_00.exe * * Explanation * without the -arch=sm_35 (architecture, SM 3.5) flag, I obtain this error: * error: calling a __global__ function("cdp_kernel") from a __global__ function("cdp_kernel") is only allowed on the compute_35 architecture or above * * On my "GeForce GTX 980 Ti" it says * CUDA Capability Major/Minor version number: 5.2 * from running * cs344/Lesson Code Snippets/Lesson 5 Code Snippets/deviceQuery_simplified.exe * * so flag -arch=sm_52 works, i.e. * nvcc -arch=sm_52 -rdc=true cdpSimplePrint_00.cu -o cdpSimplePrint_00.exe * * without -rdc=true flag, I obtain error: * error: kernel launch from __device__ or __global__ functions requires separate compilation mode * rdc is --relocatable-device-code=true * from CUDA Toolkit Documentation: * Enable (disable) the generation of relocatable device code. * If disabled, executable device code is generated. * Relocatable device code must be linked before it can be executed. * * Allowed values for this option: true, false. * Default value: false * * */ #include <iostream> #include <cstdio> // stderr, fprintf, printf #include "utils.h" // checkCudaErrors //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks. //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing. //////////////////////////////////////////////////////////////////////////////// __device__ void print_info(int depth, int thread, int uid, int parent_uid) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // printf("%sBLOCK %d launched by thread %d of block %d\n", buffer, uid, thread, parent_uid); printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", buffer, thread, uid, parent_uid) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // The kernel using CUDA dynamic parallelism. // // It generates a unique identifier for each block. Prints the information // about that block. Finally, if the 'max_depth' has not been reached, the // block launches new blocks directly from the GPU. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel(int max_depth, int depth, int thread, int parent_uid) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid; if (threadIdx.x == 0) { s_uid = atomicAdd(&g_uids, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info(depth, thread, s_uid, parent_uid); // We launch new blocks if we haven't reached the max_depth yet. if (++depth >= max_depth) { return; } hipLaunchKernelGGL(( cdp_kernel), dim3(gridDim.x), dim3(blockDim.x), 0, 0, max_depth, depth, threadIdx.x, s_uid); } //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks, for // my version of the kernels //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids_2 = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing; // this is my version //////////////////////////////////////////////////////////////////////////////// __device__ void print_info_2(int depth, int thread, int uid, int parent_uid) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // '\0' is termination of the string or char array // printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", // buffer, thread, uid, parent_uid) ; printf("%s depth %d thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", buffer, depth, thread, uid, parent_uid) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // My version of cdp_kernel, cdp_kernel_2 //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel_2(int max_depth, int depth, int thread, int parent_uid) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid_2; if (threadIdx.x == 0) { s_uid_2 = atomicAdd(&g_uids_2, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info_2(depth, thread, s_uid_2, parent_uid); ++depth; if (depth >= max_depth) { return; } hipLaunchKernelGGL(( cdp_kernel_2), dim3(gridDim.x),dim3(blockDim.x), 0, 0, max_depth,depth, threadIdx.x, s_uid_2); } //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks, for // my version of the kernels, with blockIdx.x print out //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids_3 = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing; // this is my version, with blcokIdx.x print out //////////////////////////////////////////////////////////////////////////////// __device__ void print_info_3(int depth, int thread, int uid, int parent_uid, int blockIndex) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // '\0' is termination of the string or char array // printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", // buffer, thread, uid, parent_uid) ; printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d for threadIdx.x = %d , blockIdx.x = %d , called from blockIndex = %d \n", buffer, thread, uid, parent_uid, threadIdx.x, blockIdx.x, blockIndex) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // My version of cdp_kernel, cdp_kernel_2 //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel_3(int max_depth, int depth, int thread, int parent_uid, int blockIndex) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid; if (threadIdx.x == 0) { s_uid = atomicAdd(&g_uids_3, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info_3(depth, thread, s_uid, parent_uid, blockIndex); ++depth; if (depth >= max_depth) { return; } hipLaunchKernelGGL(( cdp_kernel_3), dim3(gridDim.x),dim3(blockDim.x), 0, 0, max_depth,depth, threadIdx.x, s_uid, blockIdx.x); } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("starting Simple Print (CUDA Dynamic Parallelism)\n"); // Parse a few command-line arguments. int max_depth = 2; // originally 2 // Print a message describing what the sample does. printf("***************************************************************************\n"); printf("The CPU launches 2 blocks of 2 threads each. On the device each thread will\n"); printf("launch 2 blocks of 2 threads each. The GPU we will do that recursively\n"); printf("until it reaches max_depth=%d\n\n", max_depth); printf("In total 2"); int num_blocks = 2, sum = 2; for (int i = 1 ; i < max_depth ; ++i) { num_blocks *= 4; printf("+%d", num_blocks); sum += num_blocks; } printf("=%d blocks are launched!!! (%d from the GPU)\n", sum, sum-2); printf("***************************************************************************\n\n"); // We set the recursion limit for CDP to max_depth. hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, max_depth); // Launch the kernel from the CPU. printf("Launching cdp_kernel() with CUDA Dynamic Parallelism:\n\n"); hipLaunchKernelGGL(( cdp_kernel), dim3(2), dim3(2), 0, 0, max_depth, 0, 0, -1); checkCudaErrors(hipGetLastError()); // Finalize. checkCudaErrors(hipDeviceSynchronize()); /////////////////////////////////////////////////////////////////// // MY VERSION/modifications that I made to teach myself about // DYNAMIC PARALLELISM ////////////////////////////////////////////////////////////////// printf("experimenting (playing) with Simple Print (CUDA Dynamic Parallelism)\n"); std::cout << "Input in the new max_depth (maximum depth)" << std::endl; int max_depth_2 = 2; std::cin >> max_depth_2; // We set the recursion limit for CDP to max_depth. hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, max_depth_2); // Launch the kernel from the CPU. printf("Launching cdp_kernel_2() with CUDA Dynamic Parallelism:\n\n"); hipLaunchKernelGGL(( cdp_kernel_2), dim3(2), dim3(2), 0, 0, max_depth_2, 0, 0, -1); checkCudaErrors(hipGetLastError()); // Finalize. checkCudaErrors(hipDeviceSynchronize()); /////////////////////////////////////////////////////////////////// // MY VERSION/modifications that I made to teach myself about // DYNAMIC PARALLELISM; I also print out blockIdx ////////////////////////////////////////////////////////////////// printf("experimenting (playing) with Simple Print (CUDA Dynamic Parallelism); print out blockIdx as well \n"); std::cout << "Input in the new max_depth (maximum depth)" << std::endl; int max_depth_3 = 2; std::cin >> max_depth_3; // We set the recursion limit for CDP to max_depth. hipDeviceSetLimit(hipLimitDevRuntimeSyncDepth, max_depth_3); // Launch the kernel from the CPU. printf("Launching cdp_kernel_3() with CUDA Dynamic Parallelism:\n\n"); hipLaunchKernelGGL(( cdp_kernel_3), dim3(2), dim3(2), 0, 0, max_depth_3, 0, 0, -1,0); checkCudaErrors(hipGetLastError()); // Finalize. checkCudaErrors(hipDeviceSynchronize()); exit(EXIT_SUCCESS); }
ebeb62577ba9ef375389056abc05067201fc508b.cu
// cf. http://stackoverflow.com/questions/19287461/compiling-code-containing-dynamic-parallelism-fails /** Compilation tip: * nvcc -arch=sm_35 -rdc=true cdpSimplePrint_00.cu -o cdpSimplePrint_00.exe * * Explanation * without the -arch=sm_35 (architecture, SM 3.5) flag, I obtain this error: * error: calling a __global__ function("cdp_kernel") from a __global__ function("cdp_kernel") is only allowed on the compute_35 architecture or above * * On my "GeForce GTX 980 Ti" it says * CUDA Capability Major/Minor version number: 5.2 * from running * cs344/Lesson Code Snippets/Lesson 5 Code Snippets/deviceQuery_simplified.exe * * so flag -arch=sm_52 works, i.e. * nvcc -arch=sm_52 -rdc=true cdpSimplePrint_00.cu -o cdpSimplePrint_00.exe * * without -rdc=true flag, I obtain error: * error: kernel launch from __device__ or __global__ functions requires separate compilation mode * rdc is --relocatable-device-code=true * from CUDA Toolkit Documentation: * Enable (disable) the generation of relocatable device code. * If disabled, executable device code is generated. * Relocatable device code must be linked before it can be executed. * * Allowed values for this option: true, false. * Default value: false * * */ #include <iostream> #include <cstdio> // stderr, fprintf, printf #include "utils.h" // checkCudaErrors //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks. //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing. //////////////////////////////////////////////////////////////////////////////// __device__ void print_info(int depth, int thread, int uid, int parent_uid) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // printf("%sBLOCK %d launched by thread %d of block %d\n", buffer, uid, thread, parent_uid); printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", buffer, thread, uid, parent_uid) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // The kernel using CUDA dynamic parallelism. // // It generates a unique identifier for each block. Prints the information // about that block. Finally, if the 'max_depth' has not been reached, the // block launches new blocks directly from the GPU. //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel(int max_depth, int depth, int thread, int parent_uid) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid; if (threadIdx.x == 0) { s_uid = atomicAdd(&g_uids, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info(depth, thread, s_uid, parent_uid); // We launch new blocks if we haven't reached the max_depth yet. if (++depth >= max_depth) { return; } cdp_kernel<<<gridDim.x, blockDim.x>>>(max_depth, depth, threadIdx.x, s_uid); } //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks, for // my version of the kernels //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids_2 = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing; // this is my version //////////////////////////////////////////////////////////////////////////////// __device__ void print_info_2(int depth, int thread, int uid, int parent_uid) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // '\0' is termination of the string or char array // printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", // buffer, thread, uid, parent_uid) ; printf("%s depth %d thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", buffer, depth, thread, uid, parent_uid) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // My version of cdp_kernel, cdp_kernel_2 //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel_2(int max_depth, int depth, int thread, int parent_uid) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid_2; if (threadIdx.x == 0) { s_uid_2 = atomicAdd(&g_uids_2, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info_2(depth, thread, s_uid_2, parent_uid); ++depth; if (depth >= max_depth) { return; } cdp_kernel_2<<<gridDim.x,blockDim.x>>>(max_depth,depth, threadIdx.x, s_uid_2); } //////////////////////////////////////////////////////////////////////////////// // Variable on the GPU used to generate unique identifiers of blocks, for // my version of the kernels, with blockIdx.x print out //////////////////////////////////////////////////////////////////////////////// __device__ int g_uids_3 = 0; //////////////////////////////////////////////////////////////////////////////// // Print a simple message to signal the block which is currently executing; // this is my version, with blcokIdx.x print out //////////////////////////////////////////////////////////////////////////////// __device__ void print_info_3(int depth, int thread, int uid, int parent_uid, int blockIndex) { if (threadIdx.x == 0) { if (depth == 0) printf("BLOCK %d launched by the host\n", uid); else { char buffer[32]; for (int i = 0 ; i < depth ; ++i) { buffer[3*i+0] = '|'; buffer[3*i+1] = ' '; buffer[3*i+2] = ' '; } buffer[3*depth] = '\0'; // '\0' is termination of the string or char array // printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d \n", // buffer, thread, uid, parent_uid) ; printf("%s thread %d launches BLOCK (i.e. uid) %d of block (parent_uid) %d for threadIdx.x = %d , blockIdx.x = %d , called from blockIndex = %d \n", buffer, thread, uid, parent_uid, threadIdx.x, blockIdx.x, blockIndex) ; } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////// // My version of cdp_kernel, cdp_kernel_2 //////////////////////////////////////////////////////////////////////////////// __global__ void cdp_kernel_3(int max_depth, int depth, int thread, int parent_uid, int blockIndex) { // We create a unique ID per block. Thread 0 does that and shares the value with the other threads. __shared__ int s_uid; if (threadIdx.x == 0) { s_uid = atomicAdd(&g_uids_3, 1); } __syncthreads(); // We print the ID of the block and information about its parent. print_info_3(depth, thread, s_uid, parent_uid, blockIndex); ++depth; if (depth >= max_depth) { return; } cdp_kernel_3<<<gridDim.x,blockDim.x>>>(max_depth,depth, threadIdx.x, s_uid, blockIdx.x); } //////////////////////////////////////////////////////////////////////////////// // Main entry point. //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("starting Simple Print (CUDA Dynamic Parallelism)\n"); // Parse a few command-line arguments. int max_depth = 2; // originally 2 // Print a message describing what the sample does. printf("***************************************************************************\n"); printf("The CPU launches 2 blocks of 2 threads each. On the device each thread will\n"); printf("launch 2 blocks of 2 threads each. The GPU we will do that recursively\n"); printf("until it reaches max_depth=%d\n\n", max_depth); printf("In total 2"); int num_blocks = 2, sum = 2; for (int i = 1 ; i < max_depth ; ++i) { num_blocks *= 4; printf("+%d", num_blocks); sum += num_blocks; } printf("=%d blocks are launched!!! (%d from the GPU)\n", sum, sum-2); printf("***************************************************************************\n\n"); // We set the recursion limit for CDP to max_depth. cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth); // Launch the kernel from the CPU. printf("Launching cdp_kernel() with CUDA Dynamic Parallelism:\n\n"); cdp_kernel<<<2, 2>>>(max_depth, 0, 0, -1); checkCudaErrors(cudaGetLastError()); // Finalize. checkCudaErrors(cudaDeviceSynchronize()); /////////////////////////////////////////////////////////////////// // MY VERSION/modifications that I made to teach myself about // DYNAMIC PARALLELISM ////////////////////////////////////////////////////////////////// printf("experimenting (playing) with Simple Print (CUDA Dynamic Parallelism)\n"); std::cout << "Input in the new max_depth (maximum depth)" << std::endl; int max_depth_2 = 2; std::cin >> max_depth_2; // We set the recursion limit for CDP to max_depth. cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth_2); // Launch the kernel from the CPU. printf("Launching cdp_kernel_2() with CUDA Dynamic Parallelism:\n\n"); cdp_kernel_2<<<2, 2>>>(max_depth_2, 0, 0, -1); checkCudaErrors(cudaGetLastError()); // Finalize. checkCudaErrors(cudaDeviceSynchronize()); /////////////////////////////////////////////////////////////////// // MY VERSION/modifications that I made to teach myself about // DYNAMIC PARALLELISM; I also print out blockIdx ////////////////////////////////////////////////////////////////// printf("experimenting (playing) with Simple Print (CUDA Dynamic Parallelism); print out blockIdx as well \n"); std::cout << "Input in the new max_depth (maximum depth)" << std::endl; int max_depth_3 = 2; std::cin >> max_depth_3; // We set the recursion limit for CDP to max_depth. cudaDeviceSetLimit(cudaLimitDevRuntimeSyncDepth, max_depth_3); // Launch the kernel from the CPU. printf("Launching cdp_kernel_3() with CUDA Dynamic Parallelism:\n\n"); cdp_kernel_3<<<2, 2>>>(max_depth_3, 0, 0, -1,0); checkCudaErrors(cudaGetLastError()); // Finalize. checkCudaErrors(cudaDeviceSynchronize()); exit(EXIT_SUCCESS); }
58777f24f568e86db87b9f006a711615daaea440.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> //#include <hip/hip_cooperative_groups.h> #include <math.h> #include <string.h> #include <sstream> #include <fstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 32 #define C 96 #define H 31 #define W 31 #define R 5 #define S 5 #define M 256 #define E 27 #define F 27 #define U 1 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt) { //printf("gpu2 started\n"); float red_sum = 0; int row = threadIdx.y; int col = threadIdx.x; for(int i=0; i<num_ch; i++) { red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ; } d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum; } __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//printf("gpu started\n"); __shared__ float s_w[R*S]; __shared__ float s_i[H*W]; int row = threadIdx.y; int col = threadIdx.x; if(row*width+col<R*S) { s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)]; } { int s_i_idx = row*blockDim.x+col; s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx]; if(s_i_idx+729 < H*W) s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729]; } __syncthreads(); float prod = 0; if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width { for (int i=0; i<wt_width; i++){ float3 ip1 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col)); float3 wt1 = *((float3*)(s_w+i*wt_width)); float3 ip2 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col+3));float3 wt2 = *((float3*)(s_w+i*wt_width+3)); prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y; __syncthreads(); } if(prod>=0) d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod; if(row*width+col<R*S){ s_w[(row*width+col)] = 0; __syncthreads(); } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); // IP[c][d] = (a+b+c+d); //IP[n*C*H*W+k*H*W+c*W+d] = (float)(c+d)/255; } } } } if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT,batch_size); printf("cpu done\n"); //cpu_end = clock(); dim3 dimGrid(batch_size,256,96); dim3 dimBlock(27,27,1); dim3 dimGridRed(batch_size,256,1); dim3 dimBlockRed(27,27,1); //int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384; //gpu_start = clock(); //hipFuncSetSharedMemConfig(ew_gpu_mmul,hipSharedMemBankSizeEightByte);hipLaunchKernelGGL(( ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96); hipDeviceSynchronize();hipLaunchKernelGGL(( red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,96,batch_size,256); //gpu_end = clock(); //void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch }; //hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL); //hipDeviceSynchronize(); hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error = 0; string filename = "layer_2_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; hipFree(d_o); hipFree(d_i); hipFree(d_w); hipFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
58777f24f568e86db87b9f006a711615daaea440.cu
#include <stdio.h> #include <iostream> //#include <cooperative_groups.h> #include <math.h> #include <string.h> #include <sstream> #include <fstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 32 #define C 96 #define H 31 #define W 31 #define R 5 #define S 5 #define M 256 #define E 27 #define F 27 #define U 1 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt) { //printf("gpu2 started\n"); float red_sum = 0; int row = threadIdx.y; int col = threadIdx.x; for(int i=0; i<num_ch; i++) { red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ; } d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum; } __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//printf("gpu started\n"); __shared__ float s_w[R*S]; __shared__ float s_i[H*W]; int row = threadIdx.y; int col = threadIdx.x; if(row*width+col<R*S) { s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)]; } { int s_i_idx = row*blockDim.x+col; s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx]; if(s_i_idx+729 < H*W) s_i[s_i_idx+729]= d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+729]; } __syncthreads(); float prod = 0; if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width { for (int i=0; i<wt_width; i++){ float3 ip1 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col)); float3 wt1 = *((float3*)(s_w+i*wt_width)); float3 ip2 = *((float3*)(s_i+(stride*row+i)*ip_height+stride*col+3));float3 wt2 = *((float3*)(s_w+i*wt_width+3)); prod += ip1.x*wt1.x+ip1.y*wt1.y+ip1.z*wt1.z+ip2.x*wt2.x+ip2.y*wt2.y; __syncthreads(); } if(prod>=0) d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod; if(row*width+col<R*S){ s_w[(row*width+col)] = 0; __syncthreads(); } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); // IP[c][d] = (a+b+c+d); //IP[n*C*H*W+k*H*W+c*W+d] = (float)(c+d)/255; } } } } if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_o,(long int)C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT,batch_size); printf("cpu done\n"); //cpu_end = clock(); dim3 dimGrid(batch_size,256,96); dim3 dimBlock(27,27,1); dim3 dimGridRed(batch_size,256,1); dim3 dimBlockRed(27,27,1); //int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384; //gpu_start = clock(); //cudaFuncSetSharedMemConfig(ew_gpu_mmul,cudaSharedMemBankSizeEightByte); ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,27,27,1,31,5,256,batch_size,96); cudaDeviceSynchronize(); red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,96,batch_size,256); //gpu_end = clock(); //void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch }; //cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL); //cudaDeviceSynchronize(); cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error = 0; string filename = "layer_2_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; cudaFree(d_o); cudaFree(d_i); cudaFree(d_w); cudaFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
c105be1c83addfea784674e276d762723dc15fdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "star3d2r-32x32-3-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 28; const AN5D_TYPE __side3Len = 28; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
c105be1c83addfea784674e276d762723dc15fdd.cu
#include "star3d2r-32x32-3-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 20; const AN5D_TYPE __side3Len = 20; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_2_3; double __reg_2_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_2_0, 0); __LOAD(__reg_2_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __LOAD(__reg_0_4, 9); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, 10); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, 11); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 12); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_3, __h + 0); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __LOAD(__reg_0_4, __h + 1); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __LOAD(__reg_0_0, __h + 2); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __LOAD(__reg_0_1, __h + 3); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); __STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0); __STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1); } } else { for (__h = 13; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_1_0, 0); __LOAD(__reg_1_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __LOAD(__reg_0_0, 5); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, 6); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, 7); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, 8); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); } __c_sb = __c_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3); } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_4, __h + 0); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __LOAD(__reg_0_0, __h + 1); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __LOAD(__reg_0_1, __h + 2); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __LOAD(__reg_0_2, __h + 3); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1); __STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2); } } else { for (__h = 9; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 2 - 2); const AN5D_TYPE __c3Pad = (2); #define __c3 c3 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; const AN5D_TYPE __halo3 = 2; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 28; const AN5D_TYPE __side3Len = 28; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0_0; double __reg_0_1; double __reg_0_2; double __reg_0_3; double __reg_0_4; __shared__ double __c_sb_double[__blockSize * 2]; double *__c_sb = __c_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((((((0.2500f * (__REGREF(__c, 0, 0))) + (0.0620f * (__REGREF(__b, 0, 0)))) + (0.0621f * (__REGREF(__d, 0, 0)))) + (0.0622f * (__SBREF(__c_sb, -1, 0)))) + (0.0623f * (__SBREF(__c_sb, 1, 0)))) + (0.0624f * (__SBREF(__c_sb, 0, -1)))) + (0.06245f * (__SBREF(__c_sb, 0, 1)))) + (0.06255f * (__REGREF(__a, 0, 0)))) + (0.0626f * (__REGREF(__e, 0, 0)))) + (0.0627f * (__SBREF(__c_sb, -2, 0)))) + (0.0628f * (__SBREF(__c_sb, 2, 0)))) + (0.0629f * (__SBREF(__c_sb, 0, -2)))) + (0.0630f * (__SBREF(__c_sb, 0, 2)))); } while (0) #define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0) #define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0) if (__c1Id == 0) { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } else { __LOAD(__reg_0_0, 0); __LOAD(__reg_0_1, 1); __LOAD(__reg_0_2, 2); __LOAD(__reg_0_3, 3); __LOAD(__reg_0_4, 4); __STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); } __c_sb = __c_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { } else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); } else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0_0, __h + 0); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __LOAD(__reg_0_1, __h + 1); __STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __LOAD(__reg_0_2, __h + 2); __STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __LOAD(__reg_0_3, __h + 3); __STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); } } else { for (__h = 5; __h <= __side1LenOl - 5;) { __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0_0, __h); __STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_1, __h); __STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_2, __h); __STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_3, __h); __STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0_4, __h); __STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4); __h++; } }
022b82dcb060e08b24a7e1935eb835a4e5b9d482.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <unordered_map> #include <utility> #include <vector> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include "paddle/fluid/framework/scope.h" namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetGPUDeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef __HIPCC__ if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<phi::GPUContext>::apply( typename std::enable_if< std::is_floating_point<T>::value || std::is_same<T, ::paddle::platform::complex<float>>::value || std::is_same<T, ::paddle::platform::complex<double>>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<phi::GPUContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = tensor_.place().device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc( dev_ctx->GetPlace(), op_var.length() + 1, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx->stream()))); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef __HIPCC__ PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } #ifdef __HIPCC__ // HIP will throw GPU memory access fault if threads > 256 const size_t threads = 256; #else const size_t threads = 1024; #endif size_t blocks = ::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef __HIPCC__ hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else hipLaunchKernelGGL(( CheckNanInfKernel), dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<phi::GPUContext>(const std::string& op_type, const std::string& var_name, const phi::DenseTensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<phi::GPUContext> vistor( op_type, var_name, tensor, place); VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor); } } // namespace details } // namespace framework } // namespace paddle
022b82dcb060e08b24a7e1935eb835a4e5b9d482.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <algorithm> #include <unordered_map> #include <utility> #include <vector> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils.h" #include "paddle/fluid/framework/details/nan_inf_utils_detail.h" #include "paddle/fluid/framework/scope.h" namespace paddle { namespace framework { namespace details { static std::once_flag init_multi_gpu_op_var_map_flag; // lazy init static std::vector<std::unordered_map<std::string, memory::AllocationPtr>>& multi_op_var2gpu_str() { static std::vector<std::unordered_map<std::string, memory::AllocationPtr>> _multi_op_var2gpu_str; return _multi_op_var2gpu_str; } static std::vector<std::mutex>& multi_op_var2gpu_str_mutex() { static std::vector<std::mutex> _multi_op_var2gpu_str_mutex; return _multi_op_var2gpu_str_mutex; } static void InitMultiGPUOpVarMap() { int dev_count = platform::GetGPUDeviceCount(); PADDLE_ENFORCE_GT(dev_count, 0, platform::errors::NotFound( "cuda device must > 0, now dev_count=%d", dev_count)); // https://stackoverflow.com/questions/16465633/how-can-i-use-something-like-stdvectorstdmutex std::vector<std::unordered_map<std::string, memory::AllocationPtr>> tmp_multi( dev_count); std::vector<std::mutex> tmp_multi_mutex(dev_count); multi_op_var2gpu_str().swap(tmp_multi); multi_op_var2gpu_str_mutex().swap(tmp_multi_mutex); } template <typename T> __device__ __forceinline__ void PrintNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; __shared__ unsigned int nan_count, inf_count, num_count; if (threadIdx.x == 0) nan_count = inf_count = num_count = 0; __syncthreads; for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { unsigned int count = 0; if (isnan(value[i])) { count = atomicAdd(&nan_count, 1); } else if (isinf(value[i])) { count = atomicAdd(&inf_count, 1); } else { count = atomicAdd(&num_count, 1); } // for cuda, print in every block if (count < print_num) { printf("numel:%lu idx:%lu value:%f\n", static_cast<uint64_t>(numel), static_cast<uint64_t>(i), static_cast<float>(value[i])); } } __syncthreads; #ifdef __HIPCC__ if (true && hipThreadIdx_x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", hipBlockIdx_x, nan_count, inf_count, num_count); #else if (true && threadIdx.x == 0) { printf("In block %d, there has %u,%u,%u nan,inf,num\n", blockIdx.x, nan_count, inf_count, num_count); #endif PADDLE_ENFORCE(false, "===ERROR: in %s find nan or inf===", debug_info); } } // Resnet 2gpus speed test, no check 270 images/s, this check 229 images/s template <typename T> __global__ void CheckNanInfKernel(const T* value, const size_t numel, int print_num, char* debug_info) { /// step 1, judge wheater has nan or inf __shared__ volatile int has_nan_inf; if (threadIdx.x == 0) has_nan_inf = false; __syncthreads(); const size_t tid = threadIdx.x + blockIdx.x * blockDim.x; T sum = static_cast<T>(0.0); // Todo(wangxi). simd speed up for (size_t i = tid; i < numel; i += blockDim.x * gridDim.x) { sum += (value[i] - value[i]); } if (isnan(sum) || isinf(sum)) has_nan_inf = true; __syncthreads(); /// Note. different blocks may behave differently if (!has_nan_inf) return; PrintNanInfKernel(value, numel, print_num, debug_info); } template <> template <typename T> void TensorCheckerVisitor<phi::GPUContext>::apply( typename std::enable_if< std::is_floating_point<T>::value || std::is_same<T, ::paddle::platform::complex<float>>::value || std::is_same<T, ::paddle::platform::complex<double>>::value>::type*) const { int print_num = 3; auto* dev_ctx = reinterpret_cast<phi::GPUContext*>( platform::DeviceContextPool::Instance().Get(tensor_.place())); int dev_id = tensor_.place().device; PADDLE_ENFORCE_EQ( (dev_id >= 0 && dev_id < multi_op_var2gpu_str_mutex().size()), true, platform::errors::OutOfRange("GPU dev_id must >=0 and < dev_count=%d", multi_op_var2gpu_str_mutex().size())); std::string op_var = "[op=" + op_type_ + "] [tensor=" + var_name_ + "]"; char* gpu_str_ptr = NULL; { auto& op_var2gpu_str_mutex = multi_op_var2gpu_str_mutex().at(dev_id); auto& op_var2gpu_str = multi_op_var2gpu_str().at(dev_id); std::lock_guard<std::mutex> guard(op_var2gpu_str_mutex); if (op_var2gpu_str.find(op_var) == op_var2gpu_str.end()) { // insert auto gpu_str_tensor = paddle::memory::Alloc( dev_ctx->GetPlace(), op_var.length() + 1, phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx->stream()))); gpu_str_ptr = reinterpret_cast<char*>(gpu_str_tensor->ptr()); op_var2gpu_str.emplace(op_var, std::move(gpu_str_tensor)); auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should successed insert into " "op_var2gpu_str, but now failed", op_var)); #ifdef __HIPCC__ PADDLE_ENFORCE_GPU_SUCCESS(hipMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, hipMemcpyHostToDevice, dev_ctx->stream())); #else PADDLE_ENFORCE_GPU_SUCCESS(cudaMemcpyAsync(gpu_str_ptr, iter->first.c_str(), op_var.length() + 1, cudaMemcpyHostToDevice, dev_ctx->stream())); #endif } else { // get auto iter = op_var2gpu_str.find(op_var); PADDLE_ENFORCE_EQ(iter != op_var2gpu_str.end(), true, platform::errors::PreconditionNotMet( "op_var=%s should be in the op_var2gpu_str, but " "now can't find it", op_var)); gpu_str_ptr = reinterpret_cast<char*>(iter->second->ptr()); } } #ifdef __HIPCC__ // HIP will throw GPU memory access fault if threads > 256 const size_t threads = 256; #else const size_t threads = 1024; #endif size_t blocks = std::min(static_cast<size_t>(128), static_cast<size_t>((tensor_.numel() + threads - 1) / threads)); #ifdef __HIPCC__ hipLaunchKernelGGL(CheckNanInfKernel, dim3(blocks), dim3(threads), 0, dev_ctx->stream(), tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #else CheckNanInfKernel<<<blocks, threads, 0, dev_ctx->stream()>>>( tensor_.data<T>(), tensor_.numel(), print_num, gpu_str_ptr); #endif } template <> void tensor_check<phi::GPUContext>(const std::string& op_type, const std::string& var_name, const phi::DenseTensor& tensor, const platform::Place& place) { std::call_once(init_multi_gpu_op_var_map_flag, InitMultiGPUOpVarMap); TensorCheckerVisitor<phi::GPUContext> vistor( op_type, var_name, tensor, place); VisitDataType(framework::TransToProtoVarType(tensor.dtype()), vistor); } } // namespace details } // namespace framework } // namespace paddle
625b2acdce24d0edb8b1a3a3e16e13fe35ab8867.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/inner_product.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "fem.h" //#define NeoHooken #define __NTHREADS 256 void FEMFrame::cuda_memory_alloc() { assert(allocmultiple == 1); assert(_NP > 0); gpuErrchk(hipMalloc( &_d_map23, 2 * sizeof(int)) ); gpuErrchk( hipMalloc( &_d_SR, _NP*sizeof(G_Vector3)) ); gpuErrchk( hipMalloc( &_d_SRref, _NP*sizeof(G_Vector3)) ); gpuErrchk( hipMalloc( &_d_Rref, _NP*sizeof(G_Vector3)) ); gpuErrchk( hipMalloc( &_d_F, _NP*sizeof(G_Vector3)) ); gpuErrchk( hipMalloc( &_d_fixed, _NP*sizeof(int)) ); gpuErrchk( hipMalloc( &_d_EPOT_IND, _NP*sizeof(double)) ); gpuErrchk( hipMalloc( &_d_EPOT_RMV, _NP*sizeof(double)) ); gpuErrchk(hipMalloc(&_d_H_element, 3*3*sizeof(double))); gpuErrchk( hipMalloc( &_d_inv_elements, _NP*_MAX_NELEM_SHARE_NODE*sizeof(int)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_map23, int, 2); Realloc( _h_d_SR, G_Vector3, _NP); Realloc( _h_d_SRref, G_Vector3, _NP); Realloc( _h_d_Rref, G_Vector3, _NP); Realloc( _h_d_F, G_Vector3, _NP); Realloc( _h_d_fixed, int, _NP); Realloc( _h_d_EPOT_IND, double, _NP); Realloc( _h_d_EPOT_RMV, double, _NP); Realloc( _h_d_EPOT, double, _NP); Realloc( _h_d_H_element, double, 3*3); Realloc( _h_d_inv_elements,int, _NP*_MAX_NELEM_SHARE_NODE); //>>>>>> #endif } void FEMFrame::cuda_memory_alloc_elements() { assert(_NELE > 0); gpuErrchk( hipMalloc( &_d_EPOT, _NELE*sizeof(double)) ); gpuErrchk( hipMalloc( &_d_elements, _NELE*_NNODE_PER_ELEMENT*sizeof(int)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_EPOT, double, _NELE); Realloc( _h_d_elements, int, _NELE*_NNODE_PER_ELEMENT); #endif } void FEMFrame::cuda_memory_alloc_element_coeff() { assert(_NDIM > 0 && _NELE > 0 && _NNODE_PER_ELEMENT > 0 && _NINT_PER_ELEMENT > 0); int size = _NINT_PER_ELEMENT*_NDIM*_NDIM*_NDIM*_NNODE_PER_ELEMENT*_NELE; gpuErrchk( hipMalloc( &_d_gauss_weight, _NINT_PER_ELEMENT*sizeof(double)) ); gpuErrchk( hipMalloc( &_d_dFdu, size*sizeof(double)) ); gpuErrchk( hipMalloc( &_d_F_padding, _NELE*_NNODE_PER_ELEMENT*sizeof(G_Vector3)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_gauss_weight,double, _NINT_PER_ELEMENT); Realloc( _h_d_dFdu, double, size); Realloc( _h_d_F_padding, G_Vector3, _NELE*_NNODE_PER_ELEMENT); Realloc( _h_EPOT, double, _NELE); #endif } void FEMFrame::cuda_memcpy_all() { assert(sizeof(G_Vector3) == sizeof(Vector3)); assert(sizeof(G_Matrix33) == sizeof(Matrix33)); assert(_NELE > 0); assert(_NNODE_PER_ELEMENT>0);assert(_NINT_PER_ELEMENT>0); assert(_H[0][0]>0 && _H[1][1]>0 && _H[2][2]>0); int size1 = _NELE*_NNODE_PER_ELEMENT; int size2 = _NP*_MAX_NELEM_SHARE_NODE; int size3 = _NINT_PER_ELEMENT*_NDIM*_NDIM*_NDIM*_NNODE_PER_ELEMENT*_NELE; gpuErrchk( hipMemcpy( _d_map23, map23, 2*sizeof(int), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_SRref, _SRref, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_Rref, _Rref, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_F, _F, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_fixed, fixed, _NP*sizeof(int), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(_d_elements, elements, size1*sizeof(int),hipMemcpyHostToDevice) ); create_inverse_connectivities_matrix(); gpuErrchk( hipMemcpy(_d_inv_elements,inv_elements,size2*sizeof(int),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(_d_gauss_weight,gauss_weight,_NINT_PER_ELEMENT*sizeof(double),hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(_d_dFdu, dFdu, size3*sizeof(double),hipMemcpyHostToDevice) ); } __device__ G_Matrix33 getEigenF(G_Vector3 p, G_Matrix33 Fdef, double y_eigen_zbound_max, double y_eigen_zbound_min, double x_eigen_zbound_max, double x_eigen_zbound_min, double y_eigen_strain, double x_eigen_strain ) { G_Matrix33 I; I.eye(); if (p[2] <= y_eigen_zbound_max && p[2] >= y_eigen_zbound_min ){ I[1][1] = y_eigen_strain; } if (p[2] <= x_eigen_zbound_max && p[2] >= x_eigen_zbound_min ){ I[0][0] = x_eigen_strain; } return I; } __global__ void kernel_beam_fem_energy_force(int _NDIM, int _NELE, int _NINT_PER_ELEMENT, int _NNODE_PER_ELEMENT, int *_d_elements, int *_d_inv_elements, int *_d_map23, int *_d_fixed, double *_d_gauss_weight, double *_d_dFdu, double *_d_EPOT, G_Vector3 *_d_SR, G_Vector3 *_d_SRref, G_Vector3 *_d_Rref, G_Vector3 *_d_F, G_Vector3 *_d_F_padding, double* _d_H_element, double __V0 ) { for (int iele = blockDim.x * blockIdx.x + threadIdx.x;iele < _NELE; iele+= blockDim.x * gridDim.x) { int i,j,jpt,iA, in, ip, iq, ind; G_Vector3 dsj, drj, elem_center; G_Matrix33 Fe, Fdef, B, C, Cinv, FCinvtran, dEdF, hinv; G_Matrix33 eigF, invEigF; double Eele, Eint, Jet, trace, detB, J2; G_Matrix33 E, E2, I, pk2, temp1, temp2; G_Matrix33 _d_H(_d_H_element); I.eye(); /* energy of this element */ hinv = _d_H.inv(); Eele = 0; /* center of the element */ elem_center.clear();elem_center[0] = elem_center[1]=elem_center[2]= 0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; for (i = 0;i<_NDIM;i++) { elem_center[i] += 1.0/_NNODE_PER_ELEMENT *_d_Rref[jpt][i]; } } for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt].clear(); } for(iA=0;iA<_NINT_PER_ELEMENT;iA++) { /* energy of this Gauss integration point */ Eint = 0; /* deformation gradient */ Fdef.clear(); Fdef[0][0] = Fdef[1][1] = Fdef[2][2] = 1.0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; _d_SRref[jpt ] = hinv*_d_Rref[jpt]; dsj = _d_SR[jpt] - _d_SRref[jpt]; dsj.subint(); _d_H.multiply(dsj, drj); /* Add contribution from drj to F using dFdu */ for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (_NDIM == 2) Fdef[ip][iq] += _d_dFdu[ind]*drj[_d_map23[in]]; else //_NDIM == 3 Fdef[ip][iq] += _d_dFdu[ind]*drj[in]; } } } } E = Fdef.tran()*Fdef-I; B = Fdef*Fdef.tran(); C = Fdef.tran()*Fdef; Cinv = C.inv(); FCinvtran = Fdef*Cinv.tran(); Jet = Fdef.det(); J2 = Jet*Jet; detB = B.det(); Eint = 0; /* Add contribution from F to Eint */ if (_NDIM == 2) { double MU = 1; trace = B[0][0] + B[1][1]; Eint = 0.5*(trace + 1.0/detB - 3); /* multiply MU and V0 later */ dEdF = FCinvtran*(-1.0/J2) + Fdef; /* multiply MU later */ Eint *= MU; dEdF *= MU; /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (1) {// _d_fixed[jpt] == 0) { _d_F_padding[jpt][_d_map23[in] ] -= _d_gauss_weight[iA] *dEdF[ip][iq]*_d_dFdu[ind] * __V0; // printf("_d_Fpadding[%d][%d] = %g\n", jpt, _d_map23[in], _d_F_padding[jpt][_d_map23[in] ]); } } } } } } Eele += _d_gauss_weight[iA]*Eint *__V0; } _d_EPOT[iele] = Eele; } } __global__ void kernel_snap_fem_energy_force(int _NDIM, int _NELE, int _NINT_PER_ELEMENT, int _NNODE_PER_ELEMENT, int *_d_elements, int *_d_inv_elements, int *_d_map23, int *_d_fixed, double *_d_gauss_weight, double *_d_dFdu, double *_d_EPOT, G_Vector3 *_d_SR, G_Vector3 *_d_SRref, G_Vector3 *_d_Rref, G_Vector3 *_d_F, G_Vector3 *_d_F_padding, double* _d_H_element, double y_eigen_zbound_max, double y_eigen_zbound_min, double x_eigen_zbound_max, double x_eigen_zbound_min, double y_eigen_strain, double x_eigen_strain, double __V0 ) { for (int iele = blockDim.x * blockIdx.x + threadIdx.x;iele < _NELE; iele+= blockDim.x * gridDim.x) { int i,j,jpt,iA, in, ip, iq, ind, p, q, r; G_Vector3 dsj, drj, elem_center; G_Matrix33 Fe, Fdef, B, C, Cinv, FCinvtran, dEdF, hinv; G_Matrix33 eigF, invEigF; double Eele, Eint, Jet, trace, detB, J2; double lambda, mu, temp; G_Matrix33 E, E2, I, pk2, temp1, temp2; mu = 1; lambda = 1.95; G_Matrix33 _d_H(_d_H_element); I.eye(); /* energy of this element */ hinv = _d_H.inv(); Eele = 0; /* center of the element */ elem_center.clear();elem_center[0] = elem_center[1]=elem_center[2]= 0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; for (i = 0;i<_NDIM;i++) { elem_center[i] += 1.0/_NNODE_PER_ELEMENT *_d_Rref[jpt][i]; } } for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt].clear(); } for(iA=0;iA<_NINT_PER_ELEMENT;iA++) { /* energy of this Gauss integration point */ Eint = 0; /* deformation gradient */ Fdef.clear(); Fdef[0][0] = Fdef[1][1] = Fdef[2][2] = 1.0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; _d_SRref[jpt ] = hinv*_d_Rref[jpt]; dsj = _d_SR[jpt] - _d_SRref[jpt]; dsj.subint(); _d_H.multiply(dsj, drj); /* Add contribution from drj to F using dFdu */ for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (_NDIM == 2) Fdef[ip][iq] += _d_dFdu[ind]*drj[_d_map23[in]]; else //_NDIM == 3 Fdef[ip][iq] += _d_dFdu[ind]*drj[in]; } } } } eigF = getEigenF(elem_center, Fdef, y_eigen_zbound_max, y_eigen_zbound_min, x_eigen_zbound_max, x_eigen_zbound_min, y_eigen_strain, x_eigen_strain ); invEigF = eigF.inv(); Fe = Fdef*invEigF; E = Fe.tran()*Fe-I; B = Fe*Fe.tran(); C = Fe.tran()*Fe; Cinv = C.inv(); Jet = Fdef.det(); J2 = Jet*Jet; detB = B.det(); Eint = 0; /* Add contribution from F to Eint */ if (_NDIM == 2) { trace = B[0][0] + B[1][1]; Eint = 0.5*(trace + 1.0/detB - 3); /* multiply MU and V0 later */ dEdF = FCinvtran*(-1.0/J2) + Fdef; /* multiply MU later */ /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt][_d_map23[in] ] -= _d_gauss_weight[iA] * dEdF[ip][iq]*_d_dFdu[ind] * __V0; } } } } } /* Add contribution from F to Eint */ if (_NDIM == 3) { #if defined NeoHooken double C10 = 1; double D = 1e-1; double jet23 = pow(Jet, -2.0/3.0); double Ibar = B.trace() * jet23; Eint = C10*(Ibar-3) + 1.0/D *(Jet-1)*(Jet-1); dEdF = (Fdef*(invEigF*(invEigF.tran())) * 2.0*jet23 - ((Fdef.inv()).tran())*2.0/3.0 * Ibar)*C10 + ((Fdef.inv()).tran())*2.0/D *(Jet-1)*Jet; #else E2 = E*E; dEdF.clear(); for (i = 0;i<_NDIM;i++) for (j = 0;j<_NDIM;j++) for (p = 0;p<_NDIM;p++) for (r = 0;r<_NDIM;r++) { temp = 0; for (q = 0;q<_NDIM;q++) temp += 2*mu*invEigF[j][p]*Fdef[i][r]*invEigF[r][q]*E[p][q] + 2*mu*invEigF[r][p]*Fdef[i][r]*invEigF[j][q]*E[p][q]; dEdF[i][j] += 0.5*(2*lambda*E.trace()*invEigF[j][p]*Fdef[i][r] * invEigF[r][p] + temp); } Eint = 0.5*lambda*(E.trace())*(E.trace()) + mu*(E2.trace()); #endif /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt][in ] -= _d_gauss_weight[iA] * dEdF[ip][iq]*_d_dFdu[ind] * __V0; } } } } } /* Add contribution from Eint to Eele */ Eele += _d_gauss_weight[iA]*Eint *__V0; } _d_EPOT[iele] = Eele; } } __global__ void kernel_assemble_back_force(int _NP, int *_d_fixed, int *_d_inv_elements, G_Vector3 *_d_F, G_Vector3 *_d_F_padding) { for(int ind = blockDim.x * blockIdx.x + threadIdx.x; ind<_NP;ind+=blockDim.x*gridDim.x) { _d_F[ind].clear(); for (int k = 0; k<_MAX_NELEM_SHARE_NODE; k++) { int indice = ind *_MAX_NELEM_SHARE_NODE+k; if (_d_inv_elements[ indice ] >= 0 && _d_fixed[ind] == 0) { _d_F[ind] += _d_F_padding[ _d_inv_elements[indice] ]; } } } } void FEMFrame::cuda_beam_fem_energy_force() { DUMP("FEM"); gpuErrchk( hipMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice)); gpuErrchk( hipMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), hipMemcpyHostToDevice)); gpuErrchk( hipMemcpy( _d_fixed, fixed, _NP*sizeof(int), hipMemcpyHostToDevice)); _EPOT=0; //put this back for(int i=0;i<_NP;i++) { _EPOT_IND[i]=0; _EPOT_RMV[i]=0; _VIRIAL_IND[i].clear(); } _VIRIAL.clear(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif hipLaunchKernelGGL(( kernel_beam_fem_energy_force), dim3((_NELE+255)/256),dim3(__NTHREADS), 0, 0, _NDIM, _NELE, _NINT_PER_ELEMENT, _NNODE_PER_ELEMENT, _d_elements, _d_inv_elements, _d_map23, _d_fixed, _d_gauss_weight, _d_dFdu, _d_EPOT, _d_SR, _d_SRref, _d_Rref, _d_F, _d_F_padding, _d_H_element, __V0); //hipDeviceSynchronize(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif hipLaunchKernelGGL(( kernel_assemble_back_force), dim3((_NP+255)/256),dim3(__NTHREADS), 0, 0, _NP, _d_fixed, _d_inv_elements, _d_F, _d_F_padding); // hipDeviceSynchronize(); #ifdef DEBUG_USECUDA gpuErrchk(hipMemcpy(_h_EPOT,_d_EPOT, _NELE*sizeof(double), hipMemcpyDeviceToHost)); for (int i = 0; i<_NELE; i++) { printf("_h_EPOT[%d]= %g, _NELE = %d\n", i,_h_EPOT[i],_NELE); _EPOT += _h_EPOT[i]; } #else /* Reduce potential energy to CPU for relax function to call */ thrust::device_ptr<double> t_EPOT = thrust::device_pointer_cast(_d_EPOT); _EPOT = thrust::reduce(t_EPOT,t_EPOT+_NELE); #endif /* Copy force (Vector3 *) back to CPU for relax function to call */ hipMemcpy( _F, _d_F, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost); } void FEMFrame::cuda_snap_fem_energy_force() { DUMP("FEM"); gpuErrchk( hipMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), hipMemcpyHostToDevice)); gpuErrchk( hipMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), hipMemcpyHostToDevice)); gpuErrchk( hipMemcpy( _d_fixed, fixed, _NP*sizeof(int), hipMemcpyHostToDevice)); _EPOT=0; //put this back for(int i=0;i<_NP;i++) { _EPOT_IND[i]=0; _EPOT_RMV[i]=0; _VIRIAL_IND[i].clear(); } _VIRIAL.clear(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif hipLaunchKernelGGL(( kernel_snap_fem_energy_force), dim3((_NELE+255)/256),dim3(__NTHREADS), 0, 0, _NDIM, _NELE, _NINT_PER_ELEMENT, _NNODE_PER_ELEMENT, _d_elements, _d_inv_elements, _d_map23, _d_fixed, _d_gauss_weight, _d_dFdu, _d_EPOT, _d_SR, _d_SRref, _d_Rref, _d_F, _d_F_padding, _d_H_element,y_eigen_zbound_max, y_eigen_zbound_min, x_eigen_zbound_max, x_eigen_zbound_min, y_eigen_strain, x_eigen_strain , __V0); //hipDeviceSynchronize(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif hipLaunchKernelGGL(( kernel_assemble_back_force), dim3((_NP+255)/256),dim3(__NTHREADS), 0, 0, _NP, _d_fixed, _d_inv_elements, _d_F, _d_F_padding); // hipDeviceSynchronize(); #ifdef DEBUG_USECUDA gpuErrchk(hipMemcpy(_h_EPOT,_d_EPOT, _NELE*sizeof(double), hipMemcpyDeviceToHost)); for (int i = 0; i<_NELE; i++) { printf("_h_EPOT[%d]= %g, _NELE = %d\n", i,_h_EPOT[i],_NELE); _EPOT += _h_EPOT[i]; } #else /* Reduce potential energy to CPU for relax function to call */ thrust::device_ptr<double> t_EPOT = thrust::device_pointer_cast(_d_EPOT); _EPOT = thrust::reduce(t_EPOT,t_EPOT+_NELE); #endif /* Copy force (Vector3 *) back to CPU for relax function to call */ hipMemcpy( _F, _d_F, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost); } void FEMFrame::free_device_ptr() { hipFree(_d_elements); hipFree(_d_inv_elements); hipFree(_d_map23); hipFree(_d_fixed); hipFree(_d_colorids); hipFree(_d_gauss_weight); hipFree(_d_dFdu); hipFree(_d_EPOT); hipFree(_d_EPOT_IND); hipFree(_d_EPOT_RMV); hipFree(_d_SR); hipFree(_d_SRref); hipFree(_d_Rref); hipFree(_d_F); hipFree(_d_F_padding); } /* This is a simple test for GPU. run the function to see if maxErro == 0. If not, GPU device is not set correctly */ __global__ void saxpy(int n, float a, const float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int FEMFrame::test_saxpy(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); // Perform SAXPY on 1M elements hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(__NTHREADS), 0, 0, N, 2.0f, d_x, d_y); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-4.0f)); INFO_Printf("Max error: %f\n", maxError); hipFree(d_x); hipFree(d_y); free(x); free(y); return 0; } #ifdef DEBUG_USECUDA int FEMFrame::check_host_device_memory_transfer() { INFO_Printf("I am in check_host_device memory transfer\n"); assert(sizeof(G_Vector3) == sizeof(Vector3)); assert(sizeof(G_Matrix33) == sizeof(Matrix33)); assert(_NP>0); assert(_NELE > 0); assert(_NNODE_PER_ELEMENT>0);assert(_NINT_PER_ELEMENT>0); assert(_H[0][0]>0 && _H[1][1]>0 && _H[2][2]>0); int size1 = _NELE*_NNODE_PER_ELEMENT; int size2 = _NP*_MAX_NELEM_SHARE_NODE; gpuErrchk( hipMemcpy( _h_d_map23, _d_map23, 2*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_SR, _d_SR, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_SRref, _d_SRref, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_Rref, _d_Rref, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_F, _d_F, _NP*sizeof(G_Vector3), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_fixed, _d_fixed, _NP*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy( _h_d_H_element,_d_H_element, 3*3*sizeof(double), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy(_h_d_elements, _d_elements, size1*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy(_h_d_inv_elements, _d_inv_elements,size2*sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy(_h_d_gauss_weight, _d_gauss_weight,_NINT_PER_ELEMENT*sizeof(double),hipMemcpyDeviceToHost)); gpuErrchk( hipMemcpy(_h_EPOT, _d_EPOT,_NELE*sizeof(double),hipMemcpyDeviceToHost)); for (int i = 0;i<2;i++) assert(map23[i]==_h_d_map23[i]); for (int i = 0;i<_NP;i++){ // printf("_SR[i]=%g,%g,%g, _h_d_SR[i]=%g,%g,%g, diff = %g,%g,%g\n", _SR[i][0], _SR[i][1], _SR[i][2], _h_d_SR[i][0], _h_d_SR[i][1], _h_d_SR[i][2], _SR[i][0]-_h_d_SR[i][0], _SR[i][1]-_h_d_SR[i][1], _SR[i][2]-_h_d_SR[i][2]); assert((G_Vector3(_SR[i])==G_Vector3(_h_d_SR[i]))); } for (int i = 0;i<_NP;i++) assert((G_Vector3(_SRref[i])==G_Vector3(_h_d_SRref[i]))); for (int i = 0;i<_NP;i++) assert((G_Vector3(_Rref[i])==G_Vector3(_h_d_Rref[i]))); for (int i = 0;i<_NP;i++) assert((G_Vector3(_F[i])==G_Vector3(_h_d_F[i]))); for (int i = 0;i<_NP;i++) assert(fixed[i]==_h_d_fixed[i]); for (int i = 0;i<size1;i++) assert(elements[i]==_h_d_elements[i]); for (int i = 0;i<size2;i++) assert(inv_elements[i]==_h_d_inv_elements[i]); for (int i = 0;i<_NINT_PER_ELEMENT;i++) assert(fabs(gauss_weight[i]-_h_d_gauss_weight[i])<1e-15); INFO_Printf("I am about to get out of check_host_device memory transfer\n"); return 0; } #endif
625b2acdce24d0edb8b1a3a3e16e13fe35ab8867.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/extrema.h> #include <thrust/inner_product.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "fem.h" //#define NeoHooken #define __NTHREADS 256 void FEMFrame::cuda_memory_alloc() { assert(allocmultiple == 1); assert(_NP > 0); gpuErrchk(cudaMalloc( &_d_map23, 2 * sizeof(int)) ); gpuErrchk( cudaMalloc( &_d_SR, _NP*sizeof(G_Vector3)) ); gpuErrchk( cudaMalloc( &_d_SRref, _NP*sizeof(G_Vector3)) ); gpuErrchk( cudaMalloc( &_d_Rref, _NP*sizeof(G_Vector3)) ); gpuErrchk( cudaMalloc( &_d_F, _NP*sizeof(G_Vector3)) ); gpuErrchk( cudaMalloc( &_d_fixed, _NP*sizeof(int)) ); gpuErrchk( cudaMalloc( &_d_EPOT_IND, _NP*sizeof(double)) ); gpuErrchk( cudaMalloc( &_d_EPOT_RMV, _NP*sizeof(double)) ); gpuErrchk(cudaMalloc(&_d_H_element, 3*3*sizeof(double))); gpuErrchk( cudaMalloc( &_d_inv_elements, _NP*_MAX_NELEM_SHARE_NODE*sizeof(int)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_map23, int, 2); Realloc( _h_d_SR, G_Vector3, _NP); Realloc( _h_d_SRref, G_Vector3, _NP); Realloc( _h_d_Rref, G_Vector3, _NP); Realloc( _h_d_F, G_Vector3, _NP); Realloc( _h_d_fixed, int, _NP); Realloc( _h_d_EPOT_IND, double, _NP); Realloc( _h_d_EPOT_RMV, double, _NP); Realloc( _h_d_EPOT, double, _NP); Realloc( _h_d_H_element, double, 3*3); Realloc( _h_d_inv_elements,int, _NP*_MAX_NELEM_SHARE_NODE); //>>>>>> #endif } void FEMFrame::cuda_memory_alloc_elements() { assert(_NELE > 0); gpuErrchk( cudaMalloc( &_d_EPOT, _NELE*sizeof(double)) ); gpuErrchk( cudaMalloc( &_d_elements, _NELE*_NNODE_PER_ELEMENT*sizeof(int)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_EPOT, double, _NELE); Realloc( _h_d_elements, int, _NELE*_NNODE_PER_ELEMENT); #endif } void FEMFrame::cuda_memory_alloc_element_coeff() { assert(_NDIM > 0 && _NELE > 0 && _NNODE_PER_ELEMENT > 0 && _NINT_PER_ELEMENT > 0); int size = _NINT_PER_ELEMENT*_NDIM*_NDIM*_NDIM*_NNODE_PER_ELEMENT*_NELE; gpuErrchk( cudaMalloc( &_d_gauss_weight, _NINT_PER_ELEMENT*sizeof(double)) ); gpuErrchk( cudaMalloc( &_d_dFdu, size*sizeof(double)) ); gpuErrchk( cudaMalloc( &_d_F_padding, _NELE*_NNODE_PER_ELEMENT*sizeof(G_Vector3)) ); #ifdef DEBUG_USECUDA Realloc( _h_d_gauss_weight,double, _NINT_PER_ELEMENT); Realloc( _h_d_dFdu, double, size); Realloc( _h_d_F_padding, G_Vector3, _NELE*_NNODE_PER_ELEMENT); Realloc( _h_EPOT, double, _NELE); #endif } void FEMFrame::cuda_memcpy_all() { assert(sizeof(G_Vector3) == sizeof(Vector3)); assert(sizeof(G_Matrix33) == sizeof(Matrix33)); assert(_NELE > 0); assert(_NNODE_PER_ELEMENT>0);assert(_NINT_PER_ELEMENT>0); assert(_H[0][0]>0 && _H[1][1]>0 && _H[2][2]>0); int size1 = _NELE*_NNODE_PER_ELEMENT; int size2 = _NP*_MAX_NELEM_SHARE_NODE; int size3 = _NINT_PER_ELEMENT*_NDIM*_NDIM*_NDIM*_NNODE_PER_ELEMENT*_NELE; gpuErrchk( cudaMemcpy( _d_map23, map23, 2*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_SRref, _SRref, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_Rref, _Rref, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_F, _F, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_fixed, fixed, _NP*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(_d_elements, elements, size1*sizeof(int),cudaMemcpyHostToDevice) ); create_inverse_connectivities_matrix(); gpuErrchk( cudaMemcpy(_d_inv_elements,inv_elements,size2*sizeof(int),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(_d_gauss_weight,gauss_weight,_NINT_PER_ELEMENT*sizeof(double),cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(_d_dFdu, dFdu, size3*sizeof(double),cudaMemcpyHostToDevice) ); } __device__ G_Matrix33 getEigenF(G_Vector3 p, G_Matrix33 Fdef, double y_eigen_zbound_max, double y_eigen_zbound_min, double x_eigen_zbound_max, double x_eigen_zbound_min, double y_eigen_strain, double x_eigen_strain ) { G_Matrix33 I; I.eye(); if (p[2] <= y_eigen_zbound_max && p[2] >= y_eigen_zbound_min ){ I[1][1] = y_eigen_strain; } if (p[2] <= x_eigen_zbound_max && p[2] >= x_eigen_zbound_min ){ I[0][0] = x_eigen_strain; } return I; } __global__ void kernel_beam_fem_energy_force(int _NDIM, int _NELE, int _NINT_PER_ELEMENT, int _NNODE_PER_ELEMENT, int *_d_elements, int *_d_inv_elements, int *_d_map23, int *_d_fixed, double *_d_gauss_weight, double *_d_dFdu, double *_d_EPOT, G_Vector3 *_d_SR, G_Vector3 *_d_SRref, G_Vector3 *_d_Rref, G_Vector3 *_d_F, G_Vector3 *_d_F_padding, double* _d_H_element, double __V0 ) { for (int iele = blockDim.x * blockIdx.x + threadIdx.x;iele < _NELE; iele+= blockDim.x * gridDim.x) { int i,j,jpt,iA, in, ip, iq, ind; G_Vector3 dsj, drj, elem_center; G_Matrix33 Fe, Fdef, B, C, Cinv, FCinvtran, dEdF, hinv; G_Matrix33 eigF, invEigF; double Eele, Eint, Jet, trace, detB, J2; G_Matrix33 E, E2, I, pk2, temp1, temp2; G_Matrix33 _d_H(_d_H_element); I.eye(); /* energy of this element */ hinv = _d_H.inv(); Eele = 0; /* center of the element */ elem_center.clear();elem_center[0] = elem_center[1]=elem_center[2]= 0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; for (i = 0;i<_NDIM;i++) { elem_center[i] += 1.0/_NNODE_PER_ELEMENT *_d_Rref[jpt][i]; } } for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt].clear(); } for(iA=0;iA<_NINT_PER_ELEMENT;iA++) { /* energy of this Gauss integration point */ Eint = 0; /* deformation gradient */ Fdef.clear(); Fdef[0][0] = Fdef[1][1] = Fdef[2][2] = 1.0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; _d_SRref[jpt ] = hinv*_d_Rref[jpt]; dsj = _d_SR[jpt] - _d_SRref[jpt]; dsj.subint(); _d_H.multiply(dsj, drj); /* Add contribution from drj to F using dFdu */ for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (_NDIM == 2) Fdef[ip][iq] += _d_dFdu[ind]*drj[_d_map23[in]]; else //_NDIM == 3 Fdef[ip][iq] += _d_dFdu[ind]*drj[in]; } } } } E = Fdef.tran()*Fdef-I; B = Fdef*Fdef.tran(); C = Fdef.tran()*Fdef; Cinv = C.inv(); FCinvtran = Fdef*Cinv.tran(); Jet = Fdef.det(); J2 = Jet*Jet; detB = B.det(); Eint = 0; /* Add contribution from F to Eint */ if (_NDIM == 2) { double MU = 1; trace = B[0][0] + B[1][1]; Eint = 0.5*(trace + 1.0/detB - 3); /* multiply MU and V0 later */ dEdF = FCinvtran*(-1.0/J2) + Fdef; /* multiply MU later */ Eint *= MU; dEdF *= MU; /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (1) {// _d_fixed[jpt] == 0) { _d_F_padding[jpt][_d_map23[in] ] -= _d_gauss_weight[iA] *dEdF[ip][iq]*_d_dFdu[ind] * __V0; // printf("_d_Fpadding[%d][%d] = %g\n", jpt, _d_map23[in], _d_F_padding[jpt][_d_map23[in] ]); } } } } } } Eele += _d_gauss_weight[iA]*Eint *__V0; } _d_EPOT[iele] = Eele; } } __global__ void kernel_snap_fem_energy_force(int _NDIM, int _NELE, int _NINT_PER_ELEMENT, int _NNODE_PER_ELEMENT, int *_d_elements, int *_d_inv_elements, int *_d_map23, int *_d_fixed, double *_d_gauss_weight, double *_d_dFdu, double *_d_EPOT, G_Vector3 *_d_SR, G_Vector3 *_d_SRref, G_Vector3 *_d_Rref, G_Vector3 *_d_F, G_Vector3 *_d_F_padding, double* _d_H_element, double y_eigen_zbound_max, double y_eigen_zbound_min, double x_eigen_zbound_max, double x_eigen_zbound_min, double y_eigen_strain, double x_eigen_strain, double __V0 ) { for (int iele = blockDim.x * blockIdx.x + threadIdx.x;iele < _NELE; iele+= blockDim.x * gridDim.x) { int i,j,jpt,iA, in, ip, iq, ind, p, q, r; G_Vector3 dsj, drj, elem_center; G_Matrix33 Fe, Fdef, B, C, Cinv, FCinvtran, dEdF, hinv; G_Matrix33 eigF, invEigF; double Eele, Eint, Jet, trace, detB, J2; double lambda, mu, temp; G_Matrix33 E, E2, I, pk2, temp1, temp2; mu = 1; lambda = 1.95; G_Matrix33 _d_H(_d_H_element); I.eye(); /* energy of this element */ hinv = _d_H.inv(); Eele = 0; /* center of the element */ elem_center.clear();elem_center[0] = elem_center[1]=elem_center[2]= 0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; for (i = 0;i<_NDIM;i++) { elem_center[i] += 1.0/_NNODE_PER_ELEMENT *_d_Rref[jpt][i]; } } for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt].clear(); } for(iA=0;iA<_NINT_PER_ELEMENT;iA++) { /* energy of this Gauss integration point */ Eint = 0; /* deformation gradient */ Fdef.clear(); Fdef[0][0] = Fdef[1][1] = Fdef[2][2] = 1.0; for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=_d_elements[iele*_NNODE_PER_ELEMENT+j]; _d_SRref[jpt ] = hinv*_d_Rref[jpt]; dsj = _d_SR[jpt] - _d_SRref[jpt]; dsj.subint(); _d_H.multiply(dsj, drj); /* Add contribution from drj to F using dFdu */ for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; if (_NDIM == 2) Fdef[ip][iq] += _d_dFdu[ind]*drj[_d_map23[in]]; else //_NDIM == 3 Fdef[ip][iq] += _d_dFdu[ind]*drj[in]; } } } } eigF = getEigenF(elem_center, Fdef, y_eigen_zbound_max, y_eigen_zbound_min, x_eigen_zbound_max, x_eigen_zbound_min, y_eigen_strain, x_eigen_strain ); invEigF = eigF.inv(); Fe = Fdef*invEigF; E = Fe.tran()*Fe-I; B = Fe*Fe.tran(); C = Fe.tran()*Fe; Cinv = C.inv(); Jet = Fdef.det(); J2 = Jet*Jet; detB = B.det(); Eint = 0; /* Add contribution from F to Eint */ if (_NDIM == 2) { trace = B[0][0] + B[1][1]; Eint = 0.5*(trace + 1.0/detB - 3); /* multiply MU and V0 later */ dEdF = FCinvtran*(-1.0/J2) + Fdef; /* multiply MU later */ /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt][_d_map23[in] ] -= _d_gauss_weight[iA] * dEdF[ip][iq]*_d_dFdu[ind] * __V0; } } } } } /* Add contribution from F to Eint */ if (_NDIM == 3) { #if defined NeoHooken double C10 = 1; double D = 1e-1; double jet23 = pow(Jet, -2.0/3.0); double Ibar = B.trace() * jet23; Eint = C10*(Ibar-3) + 1.0/D *(Jet-1)*(Jet-1); dEdF = (Fdef*(invEigF*(invEigF.tran())) * 2.0*jet23 - ((Fdef.inv()).tran())*2.0/3.0 * Ibar)*C10 + ((Fdef.inv()).tran())*2.0/D *(Jet-1)*Jet; #else E2 = E*E; dEdF.clear(); for (i = 0;i<_NDIM;i++) for (j = 0;j<_NDIM;j++) for (p = 0;p<_NDIM;p++) for (r = 0;r<_NDIM;r++) { temp = 0; for (q = 0;q<_NDIM;q++) temp += 2*mu*invEigF[j][p]*Fdef[i][r]*invEigF[r][q]*E[p][q] + 2*mu*invEigF[r][p]*Fdef[i][r]*invEigF[j][q]*E[p][q]; dEdF[i][j] += 0.5*(2*lambda*E.trace()*invEigF[j][p]*Fdef[i][r] * invEigF[r][p] + temp); } Eint = 0.5*lambda*(E.trace())*(E.trace()) + mu*(E2.trace()); #endif /* Add contribution from drj to F using dFdu */ for(j=0;j<_NNODE_PER_ELEMENT;j++) { jpt=iele*_NNODE_PER_ELEMENT+j; for(ip=0;ip<_NDIM;ip++) { for(iq=0;iq<_NDIM;iq++) { for(in=0;in<_NDIM;in++) { ind=(((iA*_NDIM+ip)*_NDIM+iq)*_NDIM+in)*_NNODE_PER_ELEMENT+j; _d_F_padding[jpt][in ] -= _d_gauss_weight[iA] * dEdF[ip][iq]*_d_dFdu[ind] * __V0; } } } } } /* Add contribution from Eint to Eele */ Eele += _d_gauss_weight[iA]*Eint *__V0; } _d_EPOT[iele] = Eele; } } __global__ void kernel_assemble_back_force(int _NP, int *_d_fixed, int *_d_inv_elements, G_Vector3 *_d_F, G_Vector3 *_d_F_padding) { for(int ind = blockDim.x * blockIdx.x + threadIdx.x; ind<_NP;ind+=blockDim.x*gridDim.x) { _d_F[ind].clear(); for (int k = 0; k<_MAX_NELEM_SHARE_NODE; k++) { int indice = ind *_MAX_NELEM_SHARE_NODE+k; if (_d_inv_elements[ indice ] >= 0 && _d_fixed[ind] == 0) { _d_F[ind] += _d_F_padding[ _d_inv_elements[indice] ]; } } } } void FEMFrame::cuda_beam_fem_energy_force() { DUMP("FEM"); gpuErrchk( cudaMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice)); gpuErrchk( cudaMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk( cudaMemcpy( _d_fixed, fixed, _NP*sizeof(int), cudaMemcpyHostToDevice)); _EPOT=0; //put this back for(int i=0;i<_NP;i++) { _EPOT_IND[i]=0; _EPOT_RMV[i]=0; _VIRIAL_IND[i].clear(); } _VIRIAL.clear(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif kernel_beam_fem_energy_force<<<(_NELE+255)/256,__NTHREADS>>>(_NDIM, _NELE, _NINT_PER_ELEMENT, _NNODE_PER_ELEMENT, _d_elements, _d_inv_elements, _d_map23, _d_fixed, _d_gauss_weight, _d_dFdu, _d_EPOT, _d_SR, _d_SRref, _d_Rref, _d_F, _d_F_padding, _d_H_element, __V0); //cudaDeviceSynchronize(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif kernel_assemble_back_force<<<(_NP+255)/256,__NTHREADS>>>(_NP, _d_fixed, _d_inv_elements, _d_F, _d_F_padding); // cudaDeviceSynchronize(); #ifdef DEBUG_USECUDA gpuErrchk(cudaMemcpy(_h_EPOT,_d_EPOT, _NELE*sizeof(double), cudaMemcpyDeviceToHost)); for (int i = 0; i<_NELE; i++) { printf("_h_EPOT[%d]= %g, _NELE = %d\n", i,_h_EPOT[i],_NELE); _EPOT += _h_EPOT[i]; } #else /* Reduce potential energy to CPU for relax function to call */ thrust::device_ptr<double> t_EPOT = thrust::device_pointer_cast(_d_EPOT); _EPOT = thrust::reduce(t_EPOT,t_EPOT+_NELE); #endif /* Copy force (Vector3 *) back to CPU for relax function to call */ cudaMemcpy( _F, _d_F, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost); } void FEMFrame::cuda_snap_fem_energy_force() { DUMP("FEM"); gpuErrchk( cudaMemcpy( _d_SR, _SR, _NP*sizeof(G_Vector3), cudaMemcpyHostToDevice)); gpuErrchk( cudaMemcpy( _d_H_element, _H.element, 3*3*sizeof(double), cudaMemcpyHostToDevice)); gpuErrchk( cudaMemcpy( _d_fixed, fixed, _NP*sizeof(int), cudaMemcpyHostToDevice)); _EPOT=0; //put this back for(int i=0;i<_NP;i++) { _EPOT_IND[i]=0; _EPOT_RMV[i]=0; _VIRIAL_IND[i].clear(); } _VIRIAL.clear(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif kernel_snap_fem_energy_force<<<(_NELE+255)/256,__NTHREADS>>>(_NDIM, _NELE, _NINT_PER_ELEMENT, _NNODE_PER_ELEMENT, _d_elements, _d_inv_elements, _d_map23, _d_fixed, _d_gauss_weight, _d_dFdu, _d_EPOT, _d_SR, _d_SRref, _d_Rref, _d_F, _d_F_padding, _d_H_element,y_eigen_zbound_max, y_eigen_zbound_min, x_eigen_zbound_max, x_eigen_zbound_min, y_eigen_strain, x_eigen_strain , __V0); //cudaDeviceSynchronize(); #ifdef DEBUG_USECUDA assert(check_host_device_memory_transfer() == 0); #endif kernel_assemble_back_force<<<(_NP+255)/256,__NTHREADS>>>(_NP, _d_fixed, _d_inv_elements, _d_F, _d_F_padding); // cudaDeviceSynchronize(); #ifdef DEBUG_USECUDA gpuErrchk(cudaMemcpy(_h_EPOT,_d_EPOT, _NELE*sizeof(double), cudaMemcpyDeviceToHost)); for (int i = 0; i<_NELE; i++) { printf("_h_EPOT[%d]= %g, _NELE = %d\n", i,_h_EPOT[i],_NELE); _EPOT += _h_EPOT[i]; } #else /* Reduce potential energy to CPU for relax function to call */ thrust::device_ptr<double> t_EPOT = thrust::device_pointer_cast(_d_EPOT); _EPOT = thrust::reduce(t_EPOT,t_EPOT+_NELE); #endif /* Copy force (Vector3 *) back to CPU for relax function to call */ cudaMemcpy( _F, _d_F, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost); } void FEMFrame::free_device_ptr() { cudaFree(_d_elements); cudaFree(_d_inv_elements); cudaFree(_d_map23); cudaFree(_d_fixed); cudaFree(_d_colorids); cudaFree(_d_gauss_weight); cudaFree(_d_dFdu); cudaFree(_d_EPOT); cudaFree(_d_EPOT_IND); cudaFree(_d_EPOT_RMV); cudaFree(_d_SR); cudaFree(_d_SRref); cudaFree(_d_Rref); cudaFree(_d_F); cudaFree(_d_F_padding); } /* This is a simple test for GPU. run the function to see if maxErro == 0. If not, GPU device is not set correctly */ __global__ void saxpy(int n, float a, const float *x, float *y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) y[i] = a*x[i] + y[i]; } int FEMFrame::test_saxpy(void) { int N = 1<<20; float *x, *y, *d_x, *d_y; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); // Perform SAXPY on 1M elements saxpy<<<(N+255)/256, __NTHREADS>>>(N, 2.0f, d_x, d_y); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = max(maxError, abs(y[i]-4.0f)); INFO_Printf("Max error: %f\n", maxError); cudaFree(d_x); cudaFree(d_y); free(x); free(y); return 0; } #ifdef DEBUG_USECUDA int FEMFrame::check_host_device_memory_transfer() { INFO_Printf("I am in check_host_device memory transfer\n"); assert(sizeof(G_Vector3) == sizeof(Vector3)); assert(sizeof(G_Matrix33) == sizeof(Matrix33)); assert(_NP>0); assert(_NELE > 0); assert(_NNODE_PER_ELEMENT>0);assert(_NINT_PER_ELEMENT>0); assert(_H[0][0]>0 && _H[1][1]>0 && _H[2][2]>0); int size1 = _NELE*_NNODE_PER_ELEMENT; int size2 = _NP*_MAX_NELEM_SHARE_NODE; gpuErrchk( cudaMemcpy( _h_d_map23, _d_map23, 2*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_SR, _d_SR, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_SRref, _d_SRref, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_Rref, _d_Rref, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_F, _d_F, _NP*sizeof(G_Vector3), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_fixed, _d_fixed, _NP*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy( _h_d_H_element,_d_H_element, 3*3*sizeof(double), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy(_h_d_elements, _d_elements, size1*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy(_h_d_inv_elements, _d_inv_elements,size2*sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy(_h_d_gauss_weight, _d_gauss_weight,_NINT_PER_ELEMENT*sizeof(double),cudaMemcpyDeviceToHost)); gpuErrchk( cudaMemcpy(_h_EPOT, _d_EPOT,_NELE*sizeof(double),cudaMemcpyDeviceToHost)); for (int i = 0;i<2;i++) assert(map23[i]==_h_d_map23[i]); for (int i = 0;i<_NP;i++){ // printf("_SR[i]=%g,%g,%g, _h_d_SR[i]=%g,%g,%g, diff = %g,%g,%g\n", _SR[i][0], _SR[i][1], _SR[i][2], _h_d_SR[i][0], _h_d_SR[i][1], _h_d_SR[i][2], _SR[i][0]-_h_d_SR[i][0], _SR[i][1]-_h_d_SR[i][1], _SR[i][2]-_h_d_SR[i][2]); assert((G_Vector3(_SR[i])==G_Vector3(_h_d_SR[i]))); } for (int i = 0;i<_NP;i++) assert((G_Vector3(_SRref[i])==G_Vector3(_h_d_SRref[i]))); for (int i = 0;i<_NP;i++) assert((G_Vector3(_Rref[i])==G_Vector3(_h_d_Rref[i]))); for (int i = 0;i<_NP;i++) assert((G_Vector3(_F[i])==G_Vector3(_h_d_F[i]))); for (int i = 0;i<_NP;i++) assert(fixed[i]==_h_d_fixed[i]); for (int i = 0;i<size1;i++) assert(elements[i]==_h_d_elements[i]); for (int i = 0;i<size2;i++) assert(inv_elements[i]==_h_d_inv_elements[i]); for (int i = 0;i<_NINT_PER_ELEMENT;i++) assert(fabs(gauss_weight[i]-_h_d_gauss_weight[i])<1e-15); INFO_Printf("I am about to get out of check_host_device memory transfer\n"); return 0; } #endif
8d5c8e67c1e6dbee6049892d2029d8be390b6fa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define N 512 #define NUM_BLOCKS 16 #define NUM_THREADS 48 //Do not change above three lines. //Submission should be named as <RollNumber>_Prog.cu //Upload just this cu file and nothing else. If you upload it as a zip, it will not be evaluated. /*Remember the following guidelines to avoid losing marks This exercise is quite simple. The only tricky part is that total number of threads (NUM_BLOCKS*NUM_THREADS) may be different (higher or lower) from N. Index of an array should not exceed the array size. No output array-element should be computed more than once No marks will be given if the program does not compile or run (TAs will not debug your program at all) Do not change the name of any variable that we have introduced. */ #include <stdio.h> //TODO: WRITE GPU KERNEL. It should not be called repeatedly from the host, but just once. Each time it is called, it may process more than array-element or not process any array-element at all. __global__ void add(int *a, int *b, int *c) { long int total_set = ((N*N) / (NUM_BLOCKS*NUM_THREADS)) + 1; long int set = NUM_BLOCKS * NUM_THREADS; long int index_of_thread = threadIdx.x + (blockIdx.x * blockDim.x); for( long int i = 0; i < total_set; i++ ) { long int index = index_of_thread + (i*set); if( index < N*N ) { c[index] = a[index] + b[index]; } } } int main (int argc, char **argv) { int A[N][N], B[N][N], C[N][N]; int *d_A, *d_B, *d_C; // These are the copies of A, B and C on the GPU int *h_C; // This is a host copy of the output of B from the GPU int i, j; for(i=0;i<N;i++) { for(j=0;j<N;j++) { A[i][j] = i+j; B[i][j]= 2*j-1; } } // sequential implementation of main computation for(i=0;i<N;i++) { for(j=0;j<N;j++) { C[i][j] = A[i][j]+B[i][j]; } } int size = N*N * sizeof(int); // TODO: ALLOCATE MEMORY FOR GPU COPIES OF d_A, d_B and d_C hipMalloc((void **) &d_A, size); hipMalloc((void **) &d_B, size); hipMalloc((void **) &d_C, size); // TODO: COPY A TO d_A hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); // TODO: COPY B TO d_B hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); // TODO: CREATE BLOCKS with THREADS AND INVOKE GPU KERNEL //Use NUM_BLOCKS blocks, each with NUM_THREADS threads hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS),dim3(NUM_THREADS), 0, 0, d_A, d_B, d_C); // TODO: COPY d_C BACK FROM GPU to CPU in variable h_C h_C = (int *)malloc(size); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); // TODO: Verify result is correct by comparing for(i=0;i<N;i++) { for(j=0;j<N;j++) { //TODO: compare each element of h_C and C by subtracting them //print only those elements for which the above subtraction is non-zero int value = h_C[(i*N) +j] - C[i][j]; if( value != 0 ) { printf("%d %d %d\n", value, i, j); } } } //IF even one element of h_C and C differ, report an error. //Otherwise, there is no error. //If your program is correct, no error should occur. free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
8d5c8e67c1e6dbee6049892d2029d8be390b6fa2.cu
#define N 512 #define NUM_BLOCKS 16 #define NUM_THREADS 48 //Do not change above three lines. //Submission should be named as <RollNumber>_Prog.cu //Upload just this cu file and nothing else. If you upload it as a zip, it will not be evaluated. /*Remember the following guidelines to avoid losing marks This exercise is quite simple. The only tricky part is that total number of threads (NUM_BLOCKS*NUM_THREADS) may be different (higher or lower) from N. Index of an array should not exceed the array size. No output array-element should be computed more than once No marks will be given if the program does not compile or run (TAs will not debug your program at all) Do not change the name of any variable that we have introduced. */ #include <stdio.h> //TODO: WRITE GPU KERNEL. It should not be called repeatedly from the host, but just once. Each time it is called, it may process more than array-element or not process any array-element at all. __global__ void add(int *a, int *b, int *c) { long int total_set = ((N*N) / (NUM_BLOCKS*NUM_THREADS)) + 1; long int set = NUM_BLOCKS * NUM_THREADS; long int index_of_thread = threadIdx.x + (blockIdx.x * blockDim.x); for( long int i = 0; i < total_set; i++ ) { long int index = index_of_thread + (i*set); if( index < N*N ) { c[index] = a[index] + b[index]; } } } int main (int argc, char **argv) { int A[N][N], B[N][N], C[N][N]; int *d_A, *d_B, *d_C; // These are the copies of A, B and C on the GPU int *h_C; // This is a host copy of the output of B from the GPU int i, j; for(i=0;i<N;i++) { for(j=0;j<N;j++) { A[i][j] = i+j; B[i][j]= 2*j-1; } } // sequential implementation of main computation for(i=0;i<N;i++) { for(j=0;j<N;j++) { C[i][j] = A[i][j]+B[i][j]; } } int size = N*N * sizeof(int); // TODO: ALLOCATE MEMORY FOR GPU COPIES OF d_A, d_B and d_C cudaMalloc((void **) &d_A, size); cudaMalloc((void **) &d_B, size); cudaMalloc((void **) &d_C, size); // TODO: COPY A TO d_A cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); // TODO: COPY B TO d_B cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); // TODO: CREATE BLOCKS with THREADS AND INVOKE GPU KERNEL //Use NUM_BLOCKS blocks, each with NUM_THREADS threads add<<<NUM_BLOCKS,NUM_THREADS>>>(d_A, d_B, d_C); // TODO: COPY d_C BACK FROM GPU to CPU in variable h_C h_C = (int *)malloc(size); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // TODO: Verify result is correct by comparing for(i=0;i<N;i++) { for(j=0;j<N;j++) { //TODO: compare each element of h_C and C by subtracting them //print only those elements for which the above subtraction is non-zero int value = h_C[(i*N) +j] - C[i][j]; if( value != 0 ) { printf("%d %d %d\n", value, i, j); } } } //IF even one element of h_C and C differ, report an error. //Otherwise, there is no error. //If your program is correct, no error should occur. free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
684bb05fe1448efc9b3d7bf7979f7fa47959411c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "compare.h" #include "gputimer.h" // Reference __global__ void smooth(float * v_new, const float * v) { // int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int myIdx = threadIdx.x + blockIdx.x * blockDim.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } // Your code __global__ void smooth_shared(float * v_new, const float * v) { extern __shared__ float s[]; // TODO: Complete the rest of this function int myIdx = threadIdx.x + blockIdx.x * blockDim.x; int numThreads = blockDim.x * gridDim.x; int tid = threadIdx.x; if (tid == 0) { s[0] = (myIdx == 0) ? v[0] : v[myIdx - 1]; } if (tid == blockDim.x - 1) { s[blockDim.x + 1] = (myIdx == (numThreads - 1)) ? v[numThreads - 1] : v[myIdx + 1]; } s[tid+1] = v[myIdx]; __syncthreads(); v_new[myIdx] = 0.25f * s[tid] + 0.5f * s[tid+1] + 0.25f * s[tid+2]; } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096; const int BLOCK_SIZE = 256; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float h_cmp[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; float h_out_shared[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [0, 1] h_in[i] = (float)random()/(float)RAND_MAX; } for(int i = 0; i < ARRAY_SIZE; i++) { h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] + 0.50f * h_in[i] + 0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]); } // declare GPU memory pointers float * d_in, * d_out, * d_out_shared; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_out, ARRAY_BYTES); hipMalloc((void **) &d_out_shared, ARRAY_BYTES); // transfer the input array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); // hipEvent_t start, stop; // hipEventCreate(&start); // hipEventCreate(&stop); // launch the kernel hipLaunchKernelGGL(( smooth), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_out, d_in); GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( smooth_shared), dim3(ARRAY_SIZE / BLOCK_SIZE), dim3(BLOCK_SIZE), (BLOCK_SIZE + 2) * sizeof(float), 0, d_out_shared, d_in); timer.Stop(); printf("Your code executed in %g ms\n", timer.Elapsed()); // hipEventSynchronize(stop); // float elapsedTime; // hipEventElapsedTime(&elapsedTime, start, stop); // copy back the result from GPU hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost); hipMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, hipMemcpyDeviceToHost); // testing for correctness compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE); // free GPU memory allocation hipFree(d_in); hipFree(d_out); hipFree(d_out_shared); return 0; }
684bb05fe1448efc9b3d7bf7979f7fa47959411c.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "compare.h" #include "gputimer.h" // Reference __global__ void smooth(float * v_new, const float * v) { // int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int myIdx = threadIdx.x + blockIdx.x * blockDim.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } // Your code __global__ void smooth_shared(float * v_new, const float * v) { extern __shared__ float s[]; // TODO: Complete the rest of this function int myIdx = threadIdx.x + blockIdx.x * blockDim.x; int numThreads = blockDim.x * gridDim.x; int tid = threadIdx.x; if (tid == 0) { s[0] = (myIdx == 0) ? v[0] : v[myIdx - 1]; } if (tid == blockDim.x - 1) { s[blockDim.x + 1] = (myIdx == (numThreads - 1)) ? v[numThreads - 1] : v[myIdx + 1]; } s[tid+1] = v[myIdx]; __syncthreads(); v_new[myIdx] = 0.25f * s[tid] + 0.5f * s[tid+1] + 0.25f * s[tid+2]; } int main(int argc, char **argv) { const int ARRAY_SIZE = 4096; const int BLOCK_SIZE = 256; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float h_cmp[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; float h_out_shared[ARRAY_SIZE]; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [0, 1] h_in[i] = (float)random()/(float)RAND_MAX; } for(int i = 0; i < ARRAY_SIZE; i++) { h_cmp[i] = (0.25f * h_in[(i == 0) ? 0 : i-1] + 0.50f * h_in[i] + 0.25f * h_in[(i == (ARRAY_SIZE - 1)) ? ARRAY_SIZE - 1 : i+1]); } // declare GPU memory pointers float * d_in, * d_out, * d_out_shared; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); cudaMalloc((void **) &d_out_shared, ARRAY_BYTES); // transfer the input array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // launch the kernel smooth<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE>>>(d_out, d_in); GpuTimer timer; timer.Start(); smooth_shared<<<ARRAY_SIZE / BLOCK_SIZE, BLOCK_SIZE, (BLOCK_SIZE + 2) * sizeof(float)>>>(d_out_shared, d_in); timer.Stop(); printf("Your code executed in %g ms\n", timer.Elapsed()); // cudaEventSynchronize(stop); // float elapsedTime; // cudaEventElapsedTime(&elapsedTime, start, stop); // copy back the result from GPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(h_out_shared, d_out_shared, ARRAY_BYTES, cudaMemcpyDeviceToHost); // testing for correctness compare(h_in, h_out, h_out_shared, h_cmp, ARRAY_SIZE); // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); cudaFree(d_out_shared); return 0; }
c15fdef183f9e12847d4d8a473eda596125e362a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdio.h" __global__ void MyKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } } int main (void) { int arraySize = 1024*1024; int blockSize, minGridSize, gridSize, maxActiveBlocks; hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, MyKernel, 0, 0); // Round up according to array size gridSize = (arraySize + blockSize - 1) / blockSize; //MyKernel<<< gridSize, blockSize >>>(array, arrayCount); // calculate theoretical occupancy hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, MyKernel, blockSize, 0); // get device properties int device; hipDeviceProp_t props; hipGetDevice(&device); hipGetDeviceProperties(&props, device); // calculate theoretical occupancy float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); printf("Thread-blocks of size %d with gridSize %d. Theoretical occupancy: %f\n", blockSize, gridSize,occupancy); }
c15fdef183f9e12847d4d8a473eda596125e362a.cu
#include "stdio.h" __global__ void MyKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } } int main (void) { int arraySize = 1024*1024; int blockSize, minGridSize, gridSize, maxActiveBlocks; cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, MyKernel, 0, 0); // Round up according to array size gridSize = (arraySize + blockSize - 1) / blockSize; //MyKernel<<< gridSize, blockSize >>>(array, arrayCount); // calculate theoretical occupancy cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, MyKernel, blockSize, 0); // get device properties int device; cudaDeviceProp props; cudaGetDevice(&device); cudaGetDeviceProperties(&props, device); // calculate theoretical occupancy float occupancy = (maxActiveBlocks * blockSize / props.warpSize) / (float)(props.maxThreadsPerMultiProcessor / props.warpSize); printf("Thread-blocks of size %d with gridSize %d. Theoretical occupancy: %f\n", blockSize, gridSize,occupancy); }
fbc04f0266e795d5b66dab86adf37a340f945106.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : test0.cu Author : Niklas Version : Copyright : Your copyright notice Description : CUDU Reversi possibilities calculator and applicator ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include "data.cuh" static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define PG_SIZE 64; /** * CUDA kernel that computes the possibilities on the given playgrounds */ __global__ void possibilityKernel(short *pg, bool *result, short *player) { // calculate the pos on the field unsigned idx = threadIdx.x; unsigned idy = threadIdx.y; unsigned idz = threadIdx.z; unsigned id = idx + 8 * idy; // how many pgs we have unsigned pgPos = blockIdx.x * 64; if(pg[pgPos + id] == player[blockIdx.x]) { // calculate the dirs short dirX = idz / 3 == 2 ? 0 : idz / 3 * 2 - 1; short dirY = (idz % 3) == 2 ? 0 : (idz % 3) * 2 - 1; // printf("pg(%d) id(%d %d) dir(%d %d)\n", pgPos, idx, idy, dirX, dirY); bool found = false; for(int i = 1; i < 8; i++) { short posX = i * dirX + idx; short posY = i * dirY + idy; // iterate until the pos is out of the field or // until we know that it is a position we care or not care about if(posX >= 0 && posX < 8 && posY >= 0 && posY < 8) { // look if we care about the current id // if we find the other player on the pos its great because we can flip if(pg[pgPos + posX + posY * 8] == -player[blockIdx.x]) found = true; // if we find it empty and we haven't found an other player yet this dir is useless else if(pg[pgPos + posX + posY * 8] == 0 && !found) return; //if we find an empty spot and we have seen the other player before thats awesome else if(pg[pgPos + posX + posY * 8] == 0 && found) { result[pgPos + posX + posY * 8] = true; //printf("pg(%d) id(%d %d) dir(%d %d) pos(%d %d)\n", pgPos, idx, idy, dirX, dirY, posX, posY); return; // if we get to ourself somehow stop searching } else if(pg[pgPos + posX + posY * 8] == player[blockIdx.x]) return; } } } } /** * CUDA kernel that creates the new playground from the touched pos and the old one */ __global__ void changeKernel(short2 *poss, short *result, short* player) { // get the poss and the dir we are on on the field unsigned idx = blockIdx.x; unsigned dir = threadIdx.x; // how many p we have unsigned pgPos = idx * 64; // calculate the dirs short dirX = dir / 3 == 2 ? 0 : dir / 3 * 2 - 1; short dirY = (dir % 3) == 2 ? 0 : (dir % 3) * 2 - 1; bool found = false; bool dirIsRight = false; for(int i = 1; i < 8; i++) { short posX = i * dirX + poss[idx].x; short posY = i * dirY + poss[idx].y; short field = result[pgPos + posX + posY * 8]; // iterate until the pos is out of the field or // until we know that it is a position we care or not care about if(posX >= 0 && posX < 8 && posY >= 0 && posY < 8) { // look if we care about the current id // if we find the other player on the pos its great because we can flip if(field == -(player[idx])) { found = true; } // if we find it empty and we haven't found an other player yet this dir is useless else if(field == 0) { return; } // if we get to ourself somehow and we found the enemy befor its great else if(field == player[idx] && found) { dirIsRight = true; break; } else { } } } if(dirIsRight) { printf("dirX: %d dirY %d \n", dirX, dirY); for(int i = 1; i < 8; i++) { short posX = i * dirX + poss[idx].x; short posY = i * dirY + poss[idx].y; bool end = result[posX + posY * 8] == 0; result[posX + posY * 8] = player[idx]; if(end) return; } } } /** * Host function that copies the data and launches the work on GPU */ bool *gpuPoss(int size, short *pg, bool *out, short *player) { bool *cpuOut= new bool[64 * size]; short *gpuPG; short *gpuPlayer; bool *gpuOut; // allocate the mem printf("Allocating... \n \n"); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuPG, sizeof(short) * 64 * size)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuOut, sizeof(bool) * 64 * size)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuPlayer, sizeof(short) * size)); // copy the initial values CUDA_CHECK_RETURN(hipMemcpy(gpuPG, pg, sizeof(short) * 64 * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(gpuOut, out, sizeof(bool) * 64 * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(gpuPlayer, player, sizeof(short) * size, hipMemcpyHostToDevice)); const int blockCount = size; const dim3 BLOCK_SIZE(8, 8, 8); hipLaunchKernelGGL(( possibilityKernel), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, gpuPG, gpuOut, gpuPlayer); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); CUDA_CHECK_RETURN(hipMemcpy(cpuOut, gpuOut, sizeof(bool) * 64 * size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(gpuPG)); CUDA_CHECK_RETURN(hipFree(gpuOut)); CUDA_CHECK_RETURN(hipFree(gpuPlayer)); return cpuOut; } /** * Host function that copies the data and launches the work on GPU */ short *gpuPG(int size, short2 *poss, short *pg, short *player) { short *cpuPG= new short[64 * size]; short *gpuPG; short *gpuPlayer; short2 *gpuPoss; // allocate the mem printf("Allocating... \n \n"); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuPoss, sizeof(short2) * size)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuPG, sizeof(short) * 64 * size)); CUDA_CHECK_RETURN(hipMalloc((void **)&gpuPlayer, sizeof(short) * size)); // copy the initial values CUDA_CHECK_RETURN(hipMemcpy(gpuPoss, poss, sizeof(short2) * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(gpuPG, pg, sizeof(short) * 64 * size, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy(gpuPlayer, player, sizeof(short) * size, hipMemcpyHostToDevice)); const int blockCount = size; const int blockSize = 8; cout << "Running with " << size << " pgs" << endl; hipLaunchKernelGGL(( changeKernel), dim3(blockCount), dim3(blockSize), 0, 0, gpuPoss, gpuPG, gpuPlayer); // Wait for GPU to finish before accessing on host hipError_t cudaerr = hipDeviceSynchronize(); if (cudaerr != hipSuccess) { printf("kernel launch failed with error \"%s\".\n", hipGetErrorString(cudaerr)); } CUDA_CHECK_RETURN(hipMemcpy(cpuPG, gpuPG, sizeof(short) * 64 * size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(gpuPG)); CUDA_CHECK_RETURN(hipFree(gpuPoss)); CUDA_CHECK_RETURN(hipFree(gpuPlayer)); return cpuPG; } void initialize_pg(result r, short *data, short* player, int size){ for(int i = 0; i < size; ++i) { row row = r[i]; player[i] = (row[1].as<short>() % 2) * 2 -1; auto arr = row[3].as_array(); for(int j = 0; j < 64; j++) { string s = arr.get_next().second; if(s != "") { int content = std::stoi(s); data[i * 64 + j] = static_cast<short>(content); } else { j--; } } } } void initialize_poss(result r, short *data, short *player, short2 *poss, short* round,int *last_pg, int size) { for(int i = 0; i < size; i++) { row row = r[i]; poss[i] = make_short2(row[1].as<short>(), row[2].as<short>()); player[i] = (row[3].as<short>() % 2) * 2 -1; round[i] = row[3].as<short>(); last_pg[i] = row[0].as<int>(); // DATA auto arr = row[4].as_array(); for(int j = 0; j < 64; j++) { string s = arr.get_next().second; if(s != "") { int content = std::stoi(s); data[i * 64 + j] = static_cast<short>(content); } else { j--; } } } } void calculate_poss(pg pg) { // set the max size int size = 1; result r = pg.get_open_pg(size); // if we get less resize it size = r.size(); if(size == 0) { cout << "No results for poss" << endl; return; } short *data = new short[64 * size]; bool *out = (bool*) malloc(64 * size); short *player = new short(size); initialize_pg(r, data, player, size); bool *poss = gpuPoss(size, data, out, player); for(int i = 0; i < size; i++) { for(int j = 0; j < 64; j++) { if(poss[i * 64 + j]) { // TODO: add possibility int y = j / 8; int x = j % 8; int id = r[i][0].as<int>(); pg.insertPoss(id, x, y); } } } short gpuSum = std::accumulate (poss, poss + 64 * size, 0); /* Verify the results */ std::cout << "gpuSum = " << gpuSum; /* Free memory */ delete[] data; delete[] poss; } void calculate_pg(pg pg) { int size = 1; result r = pg.get_open_poss(size); size = r.size(); if(size == 0) { cout << "No results for pg" << endl; return; } short *data = new short[64 * size]; short *player = new short[size]; short2 *poss = new short2[size]; short *round = new short[size]; int *last_pg = new int[size]; initialize_poss(r, data, player, poss, round, last_pg, size); cout << "Poss(" << poss[0].x << " " << poss[0].y << ")" << endl; cout << "Player: " << player[0] << endl; short *gpu_pg = gpuPG(size, poss, data, player); pg.insertPlayground(gpu_pg, round, last_pg, poss, size); } int main(void) { pg pg; pg.connect(); //calculate_poss(pg); calculate_pg(pg); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
fbc04f0266e795d5b66dab86adf37a340f945106.cu
/* ============================================================================ Name : test0.cu Author : Niklas Version : Copyright : Your copyright notice Description : CUDU Reversi possibilities calculator and applicator ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> #include "data.cuh" static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define PG_SIZE 64; /** * CUDA kernel that computes the possibilities on the given playgrounds */ __global__ void possibilityKernel(short *pg, bool *result, short *player) { // calculate the pos on the field unsigned idx = threadIdx.x; unsigned idy = threadIdx.y; unsigned idz = threadIdx.z; unsigned id = idx + 8 * idy; // how many pgs we have unsigned pgPos = blockIdx.x * 64; if(pg[pgPos + id] == player[blockIdx.x]) { // calculate the dirs short dirX = idz / 3 == 2 ? 0 : idz / 3 * 2 - 1; short dirY = (idz % 3) == 2 ? 0 : (idz % 3) * 2 - 1; // printf("pg(%d) id(%d %d) dir(%d %d)\n", pgPos, idx, idy, dirX, dirY); bool found = false; for(int i = 1; i < 8; i++) { short posX = i * dirX + idx; short posY = i * dirY + idy; // iterate until the pos is out of the field or // until we know that it is a position we care or not care about if(posX >= 0 && posX < 8 && posY >= 0 && posY < 8) { // look if we care about the current id // if we find the other player on the pos its great because we can flip if(pg[pgPos + posX + posY * 8] == -player[blockIdx.x]) found = true; // if we find it empty and we haven't found an other player yet this dir is useless else if(pg[pgPos + posX + posY * 8] == 0 && !found) return; //if we find an empty spot and we have seen the other player before thats awesome else if(pg[pgPos + posX + posY * 8] == 0 && found) { result[pgPos + posX + posY * 8] = true; //printf("pg(%d) id(%d %d) dir(%d %d) pos(%d %d)\n", pgPos, idx, idy, dirX, dirY, posX, posY); return; // if we get to ourself somehow stop searching } else if(pg[pgPos + posX + posY * 8] == player[blockIdx.x]) return; } } } } /** * CUDA kernel that creates the new playground from the touched pos and the old one */ __global__ void changeKernel(short2 *poss, short *result, short* player) { // get the poss and the dir we are on on the field unsigned idx = blockIdx.x; unsigned dir = threadIdx.x; // how many p we have unsigned pgPos = idx * 64; // calculate the dirs short dirX = dir / 3 == 2 ? 0 : dir / 3 * 2 - 1; short dirY = (dir % 3) == 2 ? 0 : (dir % 3) * 2 - 1; bool found = false; bool dirIsRight = false; for(int i = 1; i < 8; i++) { short posX = i * dirX + poss[idx].x; short posY = i * dirY + poss[idx].y; short field = result[pgPos + posX + posY * 8]; // iterate until the pos is out of the field or // until we know that it is a position we care or not care about if(posX >= 0 && posX < 8 && posY >= 0 && posY < 8) { // look if we care about the current id // if we find the other player on the pos its great because we can flip if(field == -(player[idx])) { found = true; } // if we find it empty and we haven't found an other player yet this dir is useless else if(field == 0) { return; } // if we get to ourself somehow and we found the enemy befor its great else if(field == player[idx] && found) { dirIsRight = true; break; } else { } } } if(dirIsRight) { printf("dirX: %d dirY %d \n", dirX, dirY); for(int i = 1; i < 8; i++) { short posX = i * dirX + poss[idx].x; short posY = i * dirY + poss[idx].y; bool end = result[posX + posY * 8] == 0; result[posX + posY * 8] = player[idx]; if(end) return; } } } /** * Host function that copies the data and launches the work on GPU */ bool *gpuPoss(int size, short *pg, bool *out, short *player) { bool *cpuOut= new bool[64 * size]; short *gpuPG; short *gpuPlayer; bool *gpuOut; // allocate the mem printf("Allocating... \n \n"); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuPG, sizeof(short) * 64 * size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuOut, sizeof(bool) * 64 * size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuPlayer, sizeof(short) * size)); // copy the initial values CUDA_CHECK_RETURN(cudaMemcpy(gpuPG, pg, sizeof(short) * 64 * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(gpuOut, out, sizeof(bool) * 64 * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(gpuPlayer, player, sizeof(short) * size, cudaMemcpyHostToDevice)); const int blockCount = size; const dim3 BLOCK_SIZE(8, 8, 8); possibilityKernel<<<blockCount, BLOCK_SIZE>>> (gpuPG, gpuOut, gpuPlayer); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaMemcpy(cpuOut, gpuOut, sizeof(bool) * 64 * size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(gpuPG)); CUDA_CHECK_RETURN(cudaFree(gpuOut)); CUDA_CHECK_RETURN(cudaFree(gpuPlayer)); return cpuOut; } /** * Host function that copies the data and launches the work on GPU */ short *gpuPG(int size, short2 *poss, short *pg, short *player) { short *cpuPG= new short[64 * size]; short *gpuPG; short *gpuPlayer; short2 *gpuPoss; // allocate the mem printf("Allocating... \n \n"); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuPoss, sizeof(short2) * size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuPG, sizeof(short) * 64 * size)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuPlayer, sizeof(short) * size)); // copy the initial values CUDA_CHECK_RETURN(cudaMemcpy(gpuPoss, poss, sizeof(short2) * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(gpuPG, pg, sizeof(short) * 64 * size, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(gpuPlayer, player, sizeof(short) * size, cudaMemcpyHostToDevice)); const int blockCount = size; const int blockSize = 8; cout << "Running with " << size << " pgs" << endl; changeKernel<<<blockCount, blockSize>>> (gpuPoss, gpuPG, gpuPlayer); // Wait for GPU to finish before accessing on host cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) { printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); } CUDA_CHECK_RETURN(cudaMemcpy(cpuPG, gpuPG, sizeof(short) * 64 * size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(gpuPG)); CUDA_CHECK_RETURN(cudaFree(gpuPoss)); CUDA_CHECK_RETURN(cudaFree(gpuPlayer)); return cpuPG; } void initialize_pg(result r, short *data, short* player, int size){ for(int i = 0; i < size; ++i) { row row = r[i]; player[i] = (row[1].as<short>() % 2) * 2 -1; auto arr = row[3].as_array(); for(int j = 0; j < 64; j++) { string s = arr.get_next().second; if(s != "") { int content = std::stoi(s); data[i * 64 + j] = static_cast<short>(content); } else { j--; } } } } void initialize_poss(result r, short *data, short *player, short2 *poss, short* round,int *last_pg, int size) { for(int i = 0; i < size; i++) { row row = r[i]; poss[i] = make_short2(row[1].as<short>(), row[2].as<short>()); player[i] = (row[3].as<short>() % 2) * 2 -1; round[i] = row[3].as<short>(); last_pg[i] = row[0].as<int>(); // DATA auto arr = row[4].as_array(); for(int j = 0; j < 64; j++) { string s = arr.get_next().second; if(s != "") { int content = std::stoi(s); data[i * 64 + j] = static_cast<short>(content); } else { j--; } } } } void calculate_poss(pg pg) { // set the max size int size = 1; result r = pg.get_open_pg(size); // if we get less resize it size = r.size(); if(size == 0) { cout << "No results for poss" << endl; return; } short *data = new short[64 * size]; bool *out = (bool*) malloc(64 * size); short *player = new short(size); initialize_pg(r, data, player, size); bool *poss = gpuPoss(size, data, out, player); for(int i = 0; i < size; i++) { for(int j = 0; j < 64; j++) { if(poss[i * 64 + j]) { // TODO: add possibility int y = j / 8; int x = j % 8; int id = r[i][0].as<int>(); pg.insertPoss(id, x, y); } } } short gpuSum = std::accumulate (poss, poss + 64 * size, 0); /* Verify the results */ std::cout << "gpuSum = " << gpuSum; /* Free memory */ delete[] data; delete[] poss; } void calculate_pg(pg pg) { int size = 1; result r = pg.get_open_poss(size); size = r.size(); if(size == 0) { cout << "No results for pg" << endl; return; } short *data = new short[64 * size]; short *player = new short[size]; short2 *poss = new short2[size]; short *round = new short[size]; int *last_pg = new int[size]; initialize_poss(r, data, player, poss, round, last_pg, size); cout << "Poss(" << poss[0].x << " " << poss[0].y << ")" << endl; cout << "Player: " << player[0] << endl; short *gpu_pg = gpuPG(size, poss, data, player); pg.insertPlayground(gpu_pg, round, last_pg, poss, size); } int main(void) { pg pg; pg.connect(); //calculate_poss(pg); calculate_pg(pg); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
e84b87498d2737693a69d5b935df8b05a92477be.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuATrousWavelet.h" #include "complext.h" //#include "setup_grid.h" #include "cuNDArray_math.h" #include "cudaDeviceManager.h" using namespace Gadgetron; static inline void setup_grid( unsigned int number_of_elements, dim3 *blockDim, dim3* gridDim, unsigned int num_batches = 1 ) { int cur_device = cudaDeviceManager::Instance()->getCurrentDevice(); //int maxGridDim = cudaDeviceManager::Instance()->max_griddim(cur_device); int maxBlockDim = cudaDeviceManager::Instance()->max_blockdim(cur_device); int maxGridDim = 65535; // The default one-dimensional block dimension is... *blockDim = dim3(256); *gridDim = dim3((number_of_elements+blockDim->x-1)/blockDim->x, num_batches); // Extend block/grid dimensions if we exceeded the maximum grid dimension if( gridDim->x > maxGridDim){ blockDim->x = maxBlockDim; gridDim->x = (number_of_elements+blockDim->x-1)/blockDim->x; } if( gridDim->x > maxGridDim ){ gridDim->x = (unsigned int)::floor(std::sqrt(float(number_of_elements)/float(blockDim->x))); unsigned int num_elements_1d = blockDim->x*gridDim->x; gridDim->y *= ((number_of_elements+num_elements_1d-1)/num_elements_1d); } if( gridDim->x > maxGridDim || gridDim->y > maxGridDim){ // If this ever becomes an issue, there is an additional grid dimension to explore for compute models >= 2.0. throw cuda_error("setup_grid(): too many elements requested."); } } static __device__ void atomicAdd(float_complext * ptr, float_complext val){ atomicAdd((float*) ptr, real(val)); atomicAdd(((float*)ptr)+1,imag(val)); } template<class T> __global__ void aTrous_kernel(const T* __restrict__ image, T* __restrict__ out, int stepsize, int stride, int dim_length, typename realType<T>::Type * kernel, int kernel_length, int tot_elements){ const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < tot_elements){ T result = 0; const int dim_pos = (idx/stride)%dim_length; const int offset = idx-dim_pos*stride; for (int i = -kernel_length/2; i <= kernel_length/2; i++){ int pos = (dim_pos+i*stepsize+dim_length)%dim_length; result += image[pos*stride+offset]*kernel[i+kernel_length/2]; } atomicAdd(out+idx,result); } } template<class T> void Gadgetron::aTrousWavelet(cuNDArray<T>* in, cuNDArray<T>* out, thrust::device_vector<typename realType<T>::Type>* kernel, int stepsize,int dim, bool accumulate){ dim3 dimGrid,dimBlock; setup_grid(in->get_number_of_elements(),&dimBlock,&dimGrid,1); if (dim >= in->get_number_of_dimensions()) throw std::runtime_error("aTrousWavelet: input array has insufficient number of dimensions"); int max_grid = cudaDeviceManager::Instance()->max_griddim(); int stride = 1; for (int i = 0; i < dim; i++) stride *= in->get_size(i); if (!accumulate) clear(out); hipLaunchKernelGGL(( aTrous_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, in->get_data_ptr(), out->get_data_ptr(),stepsize,stride,in->get_size(dim),thrust::raw_pointer_cast(kernel->data()),kernel->size(),in->get_number_of_elements()); } template void Gadgetron::aTrousWavelet<float>(cuNDArray<float>*, cuNDArray<float>*, thrust::device_vector<float>*, int, int, bool); template void Gadgetron::aTrousWavelet<float_complext>(cuNDArray<float_complext>*, cuNDArray<float_complext>*, thrust::device_vector<float>*, int, int, bool);
e84b87498d2737693a69d5b935df8b05a92477be.cu
#include "cuATrousWavelet.h" #include "complext.h" //#include "setup_grid.h" #include "cuNDArray_math.h" #include "cudaDeviceManager.h" using namespace Gadgetron; static inline void setup_grid( unsigned int number_of_elements, dim3 *blockDim, dim3* gridDim, unsigned int num_batches = 1 ) { int cur_device = cudaDeviceManager::Instance()->getCurrentDevice(); //int maxGridDim = cudaDeviceManager::Instance()->max_griddim(cur_device); int maxBlockDim = cudaDeviceManager::Instance()->max_blockdim(cur_device); int maxGridDim = 65535; // The default one-dimensional block dimension is... *blockDim = dim3(256); *gridDim = dim3((number_of_elements+blockDim->x-1)/blockDim->x, num_batches); // Extend block/grid dimensions if we exceeded the maximum grid dimension if( gridDim->x > maxGridDim){ blockDim->x = maxBlockDim; gridDim->x = (number_of_elements+blockDim->x-1)/blockDim->x; } if( gridDim->x > maxGridDim ){ gridDim->x = (unsigned int)std::floor(std::sqrt(float(number_of_elements)/float(blockDim->x))); unsigned int num_elements_1d = blockDim->x*gridDim->x; gridDim->y *= ((number_of_elements+num_elements_1d-1)/num_elements_1d); } if( gridDim->x > maxGridDim || gridDim->y > maxGridDim){ // If this ever becomes an issue, there is an additional grid dimension to explore for compute models >= 2.0. throw cuda_error("setup_grid(): too many elements requested."); } } static __device__ void atomicAdd(float_complext * ptr, float_complext val){ atomicAdd((float*) ptr, real(val)); atomicAdd(((float*)ptr)+1,imag(val)); } template<class T> __global__ void aTrous_kernel(const T* __restrict__ image, T* __restrict__ out, int stepsize, int stride, int dim_length, typename realType<T>::Type * kernel, int kernel_length, int tot_elements){ const int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; if (idx < tot_elements){ T result = 0; const int dim_pos = (idx/stride)%dim_length; const int offset = idx-dim_pos*stride; for (int i = -kernel_length/2; i <= kernel_length/2; i++){ int pos = (dim_pos+i*stepsize+dim_length)%dim_length; result += image[pos*stride+offset]*kernel[i+kernel_length/2]; } atomicAdd(out+idx,result); } } template<class T> void Gadgetron::aTrousWavelet(cuNDArray<T>* in, cuNDArray<T>* out, thrust::device_vector<typename realType<T>::Type>* kernel, int stepsize,int dim, bool accumulate){ dim3 dimGrid,dimBlock; setup_grid(in->get_number_of_elements(),&dimBlock,&dimGrid,1); if (dim >= in->get_number_of_dimensions()) throw std::runtime_error("aTrousWavelet: input array has insufficient number of dimensions"); int max_grid = cudaDeviceManager::Instance()->max_griddim(); int stride = 1; for (int i = 0; i < dim; i++) stride *= in->get_size(i); if (!accumulate) clear(out); aTrous_kernel<<<dimGrid,dimBlock>>>(in->get_data_ptr(), out->get_data_ptr(),stepsize,stride,in->get_size(dim),thrust::raw_pointer_cast(kernel->data()),kernel->size(),in->get_number_of_elements()); } template void Gadgetron::aTrousWavelet<float>(cuNDArray<float>*, cuNDArray<float>*, thrust::device_vector<float>*, int, int, bool); template void Gadgetron::aTrousWavelet<float_complext>(cuNDArray<float_complext>*, cuNDArray<float_complext>*, thrust::device_vector<float>*, int, int, bool);