hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
32c11c1f3be6569871eef9780a15ed915533fa9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col); extern hipDeviceProp_t CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<int>* const blob_dilation_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 4; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; LOG(INFO) << "grid_dim: " << grid_dim; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_nd_gpu_kernel<TypeParam, 2>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
32c11c1f3be6569871eef9780a15ed915533fa9d.cu
#include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col); extern cudaDeviceProp CAFFE_TEST_CUDA_PROP; template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<int>* const blob_dilation_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 4; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; LOG(INFO) << "grid_dim: " << grid_dim; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<TypeParam><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) im2col_nd_gpu_kernel<TypeParam, 2><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe
cae02937ab9f3f56bb1dd45f9adba2fa9cf5fdaa.hip
// !!! This is a file automatically generated by hipify!!! /* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include "common.h" #include "ppm.h" #include "convolution_gpu_tmem.h" #define BLOCK_SIZE 32 texture<unsigned int, hipTextureType1D, hipReadModeElementType> texRefImg; __global__ void ConvolveHGPUTMem(unsigned int *dst, const float *kernel, int kernelSize, int w, int h) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float finalRed = 0.0f; float finalGreen = 0.0f; float finalBlue = 0.0f; for (int i = 0; i < kernelSize; i++) { int px = col + (i - kernelSize/2); // Clamp to [0, w-1] px = MIN(px, w-1); px = MAX(px, 0); //unsigned int pixel = src[row * w + px]; unsigned int pixel = tex1Dfetch(texRefImg, row * w + px); unsigned char r = pixel & 0x000000ff; unsigned char g = (pixel & 0x0000ff00) >> 8; unsigned char b = (pixel & 0x00ff0000) >> 16; finalRed += r * kernel[i]; finalGreen += g * kernel[i]; finalBlue += b * kernel[i]; } unsigned char finalRed_uc = roundf(finalRed); unsigned char finalGreen_uc = roundf(finalGreen); unsigned char finalBlue_uc = roundf(finalBlue); unsigned int finalPixel = finalRed_uc | (finalGreen_uc << 8) | (finalBlue_uc << 16); dst[row * w + col] = finalPixel; } __global__ void ConvolveVGPUTMem(unsigned int *dst, const float *kernel, int kernelSize, int w, int h) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float finalRed = 0.0f; float finalGreen = 0.0f; float finalBlue = 0.0f; for (int i = 0; i < kernelSize; i++) { int py = row + (i - kernelSize/2); // Clamp to [0, h-1] py = MIN(py, h-1); py = MAX(py, 0); unsigned int pixel = tex1Dfetch(texRefImg, py * w + col); unsigned char r = pixel & 0x000000ff; unsigned char g = (pixel & 0x0000ff00) >> 8; unsigned char b = (pixel & 0x00ff0000) >> 16; finalRed += r * kernel[i]; finalGreen += g * kernel[i]; finalBlue += b * kernel[i]; } unsigned char finalRed_uc = roundf(finalRed); unsigned char finalGreen_uc = roundf(finalGreen); unsigned char finalBlue_uc = roundf(finalBlue); unsigned int finalPixel = finalRed_uc | (finalGreen_uc << 8) | (finalBlue_uc << 16); dst[row * w + col] = finalPixel; } void ApplyFilterGPUTMem(PPMImage &destImg, PPMImage &srcImg, const float * kernel, unsigned int kernelSize) { CUDA_SUCCEEDED(hipBindTexture(0, texRefImg, srcImg.data, srcImg.height*srcImg.width*sizeof(unsigned int))); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(divUp(srcImg.width,BLOCK_SIZE),divUp(srcImg.height,BLOCK_SIZE)); hipLaunchKernelGGL(( ConvolveHGPUTMem), dim3(dimGrid), dim3(dimBlock), 0, 0, destImg.data, kernel, kernelSize, srcImg.width, srcImg.height); hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); } CUDA_SUCCEEDED(hipUnbindTexture(texRefImg)); unsigned int * bk; bk = srcImg.data; srcImg.data = destImg.data; destImg.data =bk; CUDA_SUCCEEDED(hipBindTexture(0, texRefImg, srcImg.data, destImg.height*destImg.width*sizeof(unsigned int))); hipLaunchKernelGGL(( ConvolveVGPUTMem), dim3(dimGrid), dim3(dimBlock), 0, 0, destImg.data, kernel, kernelSize, srcImg.width, srcImg.height); error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); } CUDA_SUCCEEDED(hipUnbindTexture(texRefImg)); }
cae02937ab9f3f56bb1dd45f9adba2fa9cf5fdaa.cu
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include "common.h" #include "ppm.h" #include "convolution_gpu_tmem.h" #define BLOCK_SIZE 32 texture<unsigned int, cudaTextureType1D, cudaReadModeElementType> texRefImg; __global__ void ConvolveHGPUTMem(unsigned int *dst, const float *kernel, int kernelSize, int w, int h) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float finalRed = 0.0f; float finalGreen = 0.0f; float finalBlue = 0.0f; for (int i = 0; i < kernelSize; i++) { int px = col + (i - kernelSize/2); // Clamp to [0, w-1] px = MIN(px, w-1); px = MAX(px, 0); //unsigned int pixel = src[row * w + px]; unsigned int pixel = tex1Dfetch(texRefImg, row * w + px); unsigned char r = pixel & 0x000000ff; unsigned char g = (pixel & 0x0000ff00) >> 8; unsigned char b = (pixel & 0x00ff0000) >> 16; finalRed += r * kernel[i]; finalGreen += g * kernel[i]; finalBlue += b * kernel[i]; } unsigned char finalRed_uc = roundf(finalRed); unsigned char finalGreen_uc = roundf(finalGreen); unsigned char finalBlue_uc = roundf(finalBlue); unsigned int finalPixel = finalRed_uc | (finalGreen_uc << 8) | (finalBlue_uc << 16); dst[row * w + col] = finalPixel; } __global__ void ConvolveVGPUTMem(unsigned int *dst, const float *kernel, int kernelSize, int w, int h) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; float finalRed = 0.0f; float finalGreen = 0.0f; float finalBlue = 0.0f; for (int i = 0; i < kernelSize; i++) { int py = row + (i - kernelSize/2); // Clamp to [0, h-1] py = MIN(py, h-1); py = MAX(py, 0); unsigned int pixel = tex1Dfetch(texRefImg, py * w + col); unsigned char r = pixel & 0x000000ff; unsigned char g = (pixel & 0x0000ff00) >> 8; unsigned char b = (pixel & 0x00ff0000) >> 16; finalRed += r * kernel[i]; finalGreen += g * kernel[i]; finalBlue += b * kernel[i]; } unsigned char finalRed_uc = roundf(finalRed); unsigned char finalGreen_uc = roundf(finalGreen); unsigned char finalBlue_uc = roundf(finalBlue); unsigned int finalPixel = finalRed_uc | (finalGreen_uc << 8) | (finalBlue_uc << 16); dst[row * w + col] = finalPixel; } void ApplyFilterGPUTMem(PPMImage &destImg, PPMImage &srcImg, const float * kernel, unsigned int kernelSize) { CUDA_SUCCEEDED(cudaBindTexture(0, texRefImg, srcImg.data, srcImg.height*srcImg.width*sizeof(unsigned int))); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(divUp(srcImg.width,BLOCK_SIZE),divUp(srcImg.height,BLOCK_SIZE)); ConvolveHGPUTMem<<<dimGrid, dimBlock>>>(destImg.data, kernel, kernelSize, srcImg.width, srcImg.height); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); } CUDA_SUCCEEDED(cudaUnbindTexture(texRefImg)); unsigned int * bk; bk = srcImg.data; srcImg.data = destImg.data; destImg.data =bk; CUDA_SUCCEEDED(cudaBindTexture(0, texRefImg, srcImg.data, destImg.height*destImg.width*sizeof(unsigned int))); ConvolveVGPUTMem<<<dimGrid, dimBlock>>>(destImg.data, kernel, kernelSize, srcImg.width, srcImg.height); error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); } CUDA_SUCCEEDED(cudaUnbindTexture(texRefImg)); }
f524489ff36cdf8d3e59501ce5fdbf25162d26ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float subTileM[TILE_WIDTH][TILE_WIDTH]; __shared__ float subTileN[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int m=0; m < (numAColumns-1)/TILE_WIDTH + 1; ++m) { if(Row < numARows && m*TILE_WIDTH+tx < numAColumns) { subTileM[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH + tx]; } else { subTileM[ty][tx] = 0; } if(m*TILE_WIDTH+ty < numBRows && Col < numBColumns ) { subTileN[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns + Col]; } else { subTileN[ty][tx] = 0; } __syncthreads(); if(Row < numARows && Col < numBColumns) { for(int k=0; k<TILE_WIDTH; k++) Pvalue += subTileM[ty][k] * subTileN[k][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Pvalue; } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); int sizeA = numARows * numAColumns * sizeof(float); int sizeB = numBRows * numBColumns * sizeof(float); int sizeC = numCRows * numCColumns * sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc((void **) &deviceA, sizeA); hipMalloc((void **) &deviceB, sizeB); hipMalloc((void **) &deviceC, sizeC); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, sizeA, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeB, hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid(numCColumns/TILE_WIDTH, numCRows/TILE_WIDTH, 1); if(numCColumns%TILE_WIDTH) dimGrid.x++; if(numCRows%TILE_WIDTH) dimGrid.y++; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, sizeC, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
f524489ff36cdf8d3e59501ce5fdbf25162d26ea.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILE_WIDTH 16 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { __shared__ float subTileM[TILE_WIDTH][TILE_WIDTH]; __shared__ float subTileN[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int m=0; m < (numAColumns-1)/TILE_WIDTH + 1; ++m) { if(Row < numARows && m*TILE_WIDTH+tx < numAColumns) { subTileM[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH + tx]; } else { subTileM[ty][tx] = 0; } if(m*TILE_WIDTH+ty < numBRows && Col < numBColumns ) { subTileN[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns + Col]; } else { subTileN[ty][tx] = 0; } __syncthreads(); if(Row < numARows && Col < numBColumns) { for(int k=0; k<TILE_WIDTH; k++) Pvalue += subTileM[ty][k] * subTileN[k][tx]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns + Col] = Pvalue; } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCRows * numCColumns * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); int sizeA = numARows * numAColumns * sizeof(float); int sizeB = numBRows * numBColumns * sizeof(float); int sizeC = numCRows * numCColumns * sizeof(float); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc((void **) &deviceA, sizeA); cudaMalloc((void **) &deviceB, sizeB); cudaMalloc((void **) &deviceC, sizeC); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 dimGrid(numCColumns/TILE_WIDTH, numCRows/TILE_WIDTH, 1); if(numCColumns%TILE_WIDTH) dimGrid.x++; if(numCRows%TILE_WIDTH) dimGrid.y++; dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
5849bd9a13f9363a7bba43b5b96a01cc7df341d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void segmentSumLinearKernel(void *input, Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ T *x; __shared__ T *z; __shared__ int threadsPerSegment, start, finish; if (threadIdx.x == 0) { threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<T *>(input); z = reinterpret_cast<T *>(output); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; //val[segment] = ; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void unsortedSegmentSumLinearKernel(void *input, Nd4jLong *inputShape, void *indices, Nd4jLong *indicesShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ T *x; __shared__ T *z; __shared__ I *y; //int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<T *>(input); z = reinterpret_cast<T *>(output); y = reinterpret_cast<I *>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = 0; //DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment && e != starts[segment]) { sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSum kernel template <typename T, typename I> static __global__ void segmentSumTadKernel(void* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) { __shared__ T* val; __shared__ Nd4jLong len, zIndex, total; __shared__ T* z; __shared__ int start, finish; if (threadIdx.x == 0) { auto segment = indices[blockIdx.x]; // / threadsPerSegment; z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); if (lengths[indices[idx]]) sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { hipLaunchKernelGGL(( segmentSumLinearKernel<T,I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void segmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentSumLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(0); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // // Backpropagate ops // -------------------------------------------------------------------------------------------------------------- // // Sorted sum backpropagate template <typename T, typename I> static __global__ void segmentSumBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = gradOut[gradOffsetO]; } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSumBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* inputTad, Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> int segmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int segmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input, indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static int unsortedSegmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
5849bd9a13f9363a7bba43b5b96a01cc7df341d5.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <array/NDArrayFactory.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void segmentSumLinearKernel(void *input, Nd4jLong *inputShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ T *x; __shared__ T *z; __shared__ int threadsPerSegment, start, finish; if (threadIdx.x == 0) { threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses; segment = blockIdx.x / threadsPerSegment; x = reinterpret_cast<T *>(input); z = reinterpret_cast<T *>(output); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); start = starts[segment]; finish = start + lengths[segment]; //val[segment] = ; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template<typename T, typename I> static __global__ void unsortedSegmentSumLinearKernel(void *input, Nd4jLong *inputShape, void *indices, Nd4jLong *indicesShape, int *starts, int *lengths, Nd4jLong numOfClasses, void *output, Nd4jLong *outputShape) { __shared__ T *val; __shared__ Nd4jLong xLen, zLen, segment, zIndex; __shared__ T *x; __shared__ T *z; __shared__ I *y; //int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<T *>(input); z = reinterpret_cast<T *>(output); y = reinterpret_cast<I *>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = 0; //DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment && e != starts[segment]) { sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentSum kernel template <typename T, typename I> static __global__ void segmentSumTadKernel(void* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) { __shared__ T* val; __shared__ Nd4jLong len, zIndex, total; __shared__ T* z; __shared__ int start, finish; if (threadIdx.x == 0) { auto segment = indices[blockIdx.x]; // / threadsPerSegment; z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<T *>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); if (lengths[indices[idx]]) sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1; NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { segmentSumLinearKernel<T,I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); segmentSumTadKernel<T,I><<<input->sizeAt(0), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void segmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); // NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2}); NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context); // NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0}); // classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64); // int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer()); int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer()); if (input->isVector()) { unsortedSegmentSumLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(0); std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); dims.x = input->sizeAt(0); segmentSumTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets); } } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output), NUMERIC_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // // Backpropagate ops // -------------------------------------------------------------------------------------------------------------- // // Sorted sum backpropagate template <typename T, typename I> static __global__ void segmentSumBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape) { __shared__ T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); z[zOffset] = gradOut[gradOffsetO]; } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static __global__ void segmentSumBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* inputTad, Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) { __shared__ T* x; __shared__ T* gradOut; __shared__ I* y; __shared__ T* z; __shared__ Nd4jLong xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<T*>(inputBuf); y = reinterpret_cast<I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; T* currentOut = z + outOffsets[i]; T* outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> int segmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int segmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input, indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static int unsortedSegmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); if (input->isVector()) { Nd4jLong loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1); segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0}); auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions); Nd4jLong* inputTads = packX.specialShapeInfo(); Nd4jLong* inputTadOffsets = packX.specialOffsets(); Nd4jLong* outputTads = packZ.specialShapeInfo(); Nd4jLong* outputTadOffsets = packZ.specialOffsets(); Nd4jLong* gradOutTads = packGradOut.specialShapeInfo(); Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets(); segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut}); return Status::OK(); } // -------------------------------------------------------------------------------------------------------------- // int unsortedSegmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } } }
235522de4070db684091850158ccc90d6d111c5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "simple_implement.h" __global__ void fillImage_kernel(float4 * dstPix, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; int i; for(i=0; i < 4; i++) { if(ind * 4 + i >= maxInd) return; float r = (float)threadIdx.x/(float)blockDim.x; dstPix[ind * 4 + i] = make_float4(r, 1.f - r, 0.f, 1.f); } } extern "C" { void fillImage(float4 * dstPix, uint numPix) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(numPix/4, 512); dim3 grid(nblk, 1, 1); hipLaunchKernelGGL(( fillImage_kernel), dim3(grid), dim3(block) , 0, 0, dstPix, numPix); } }
235522de4070db684091850158ccc90d6d111c5f.cu
#include "simple_implement.h" __global__ void fillImage_kernel(float4 * dstPix, uint maxInd) { unsigned ind = blockIdx.x*blockDim.x + threadIdx.x; int i; for(i=0; i < 4; i++) { if(ind * 4 + i >= maxInd) return; float r = (float)threadIdx.x/(float)blockDim.x; dstPix[ind * 4 + i] = make_float4(r, 1.f - r, 0.f, 1.f); } } extern "C" { void fillImage(float4 * dstPix, uint numPix) { dim3 block(512, 1, 1); unsigned nblk = iDivUp(numPix/4, 512); dim3 grid(nblk, 1, 1); fillImage_kernel<<< grid, block >>>(dstPix, numPix); } }
259544a840732c4bdb2d88d1a8519153c8239c4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/include/common/book.h> #define N 64 __global__ void add(int *a, int *b, int *c) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; while (x < N & y < N) c[y][x] = a[y][x] + b[y][x]; x+=grid.x*gridDim.x; y+=grid.y*gridDim.y; } int main(void) { int a[N][N], b[N][N], c[N][N]; int (*dev_a)[N][N], (*dev_b)[N][N], (*dev_c)[N][N]; dim3 blocks(N/16,N/16); dim3 threads(16,16); //allocate the memory on the GPU HANDLE_ERROR(hipMalloc((void**) &dev_a, N*N*sizeof(int)); HANDLE_ERROR(hipMalloc((void**) &dev_b, N*N*sizeof(int)); HANDLE_ERROR(hipMalloc((void**) &dev_c, N*N*sizeof(int)); //initiate the matrixes that will be added for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { a[i][j] = i*N + j; b[i][j] = 2 * (i*N + j); } } //copy initialized matrixes a and b to GPU HANDLE_ERROR(hipMemcpy(dev_a, a, N*N*sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N*N*sizeof(int), hipMemcpyHostToDevice)); //launch the addition function on GPU add << <blocks, threads >> > (dev_a, dev_b, dev_c); //copy the sum of two matrixes back to CPU HANDLE_ERROR(hipMemcpy(c, dev_c, N*N*sizeof(int), hipMemcpyDeviceToHost)); //display the result of addition of two matrixes for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { print("%d+%d=%d\n", a[i][j], b[i][j], c[i][j]); } } }
259544a840732c4bdb2d88d1a8519153c8239c4d.cu
#include <C:/Program Files (x86)/Microsoft Visual Studio 14.0/VC/include/common/book.h> #define N 64 __global__ void add(int *a, int *b, int *c) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; while (x < N & y < N) c[y][x] = a[y][x] + b[y][x]; x+=grid.x*gridDim.x; y+=grid.y*gridDim.y; } int main(void) { int a[N][N], b[N][N], c[N][N]; int (*dev_a)[N][N], (*dev_b)[N][N], (*dev_c)[N][N]; dim3 blocks(N/16,N/16); dim3 threads(16,16); //allocate the memory on the GPU HANDLE_ERROR(cudaMalloc((void**) &dev_a, N*N*sizeof(int)); HANDLE_ERROR(cudaMalloc((void**) &dev_b, N*N*sizeof(int)); HANDLE_ERROR(cudaMalloc((void**) &dev_c, N*N*sizeof(int)); //initiate the matrixes that will be added for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { a[i][j] = i*N + j; b[i][j] = 2 * (i*N + j); } } //copy initialized matrixes a and b to GPU HANDLE_ERROR(cudaMemcpy(dev_a, a, N*N*sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N*N*sizeof(int), cudaMemcpyHostToDevice)); //launch the addition function on GPU add << <blocks, threads >> > (dev_a, dev_b, dev_c); //copy the sum of two matrixes back to CPU HANDLE_ERROR(cudaMemcpy(c, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost)); //display the result of addition of two matrixes for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { print("%d+%d=%d\n", a[i][j], b[i][j], c[i][j]); } } }
f7b410719f5fb36f146cc621aa6b4649865ccb94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <torch/extension.h> // Kernel for inputs_packed of shape (F, D), where D > 1 template <typename scalar_t> __global__ void PackedToPaddedKernel( const scalar_t* __restrict__ inputs_packed, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_padded, const size_t batch_size, const size_t max_size, const size_t num_inputs, const size_t D) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_padded // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { for (size_t j = 0; j < D; ++j) { inputs_padded[batch_idx * max_size * D + f * D + j] = inputs_packed[(start + f) * D + j]; } } } // Kernel for inputs of shape (F, 1) template <typename scalar_t> __global__ void PackedToPaddedKernelD1( const scalar_t* __restrict__ inputs_packed, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_padded, const size_t batch_size, const size_t max_size, const size_t num_inputs) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_padded // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { inputs_padded[batch_idx * max_size + f] = inputs_packed[start + f]; } } // Kernel for inputs_padded of shape (B, F, D), where D > 1 template <typename scalar_t> __global__ void PaddedToPackedKernel( const scalar_t* __restrict__ inputs_padded, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_packed, const size_t batch_size, const size_t max_size, const size_t num_inputs, const size_t D) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_packed // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { for (size_t j = 0; j < D; ++j) { inputs_packed[(start + f) * D + j] = inputs_padded[batch_idx * max_size * D + f * D + j]; } } } // Kernel for inputs_padded of shape (B, F, 1) template <typename scalar_t> __global__ void PaddedToPackedKernelD1( const scalar_t* __restrict__ inputs_padded, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_packed, const size_t batch_size, const size_t max_size, const size_t num_inputs) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_packed // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { inputs_packed[start + f] = inputs_padded[batch_idx * max_size + f]; } } at::Tensor PackedToPaddedCuda( const at::Tensor inputs_packed, const at::Tensor first_idxs, const int64_t max_size) { const int64_t num_inputs = inputs_packed.size(0); const int64_t batch_size = first_idxs.size(0); AT_ASSERTM( inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); const int64_t D = inputs_packed.size(1); at::Tensor inputs_padded = at::zeros({batch_size, max_size, D}, inputs_packed.options()); const int threads = 512; const int blocks = batch_size; if (D == 1) { AT_DISPATCH_FLOATING_TYPES( inputs_packed.type(), "packed_to_padded_d1_kernel", ([&] { hipLaunchKernelGGL(( PackedToPaddedKernelD1<scalar_t>), dim3(blocks), dim3(threads), 0, 0, inputs_packed.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_padded.data_ptr<scalar_t>(), batch_size, max_size, num_inputs); })); } else { AT_DISPATCH_FLOATING_TYPES( inputs_packed.type(), "packed_to_padded_kernel", ([&] { hipLaunchKernelGGL(( PackedToPaddedKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, inputs_packed.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_padded.data_ptr<scalar_t>(), batch_size, max_size, num_inputs, D); })); } return inputs_padded; } at::Tensor PaddedToPackedCuda( const at::Tensor inputs_padded, const at::Tensor first_idxs, const int64_t num_inputs) { const int64_t batch_size = inputs_padded.size(0); const int64_t max_size = inputs_padded.size(1); AT_ASSERTM(batch_size == first_idxs.size(0), "sizes mismatch"); AT_ASSERTM( inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor"); const int64_t D = inputs_padded.size(2); at::Tensor inputs_packed = at::zeros({num_inputs, D}, inputs_padded.options()); const int threads = 512; const int blocks = batch_size; if (D == 1) { AT_DISPATCH_FLOATING_TYPES( inputs_padded.type(), "padded_to_packed_d1_kernel", ([&] { hipLaunchKernelGGL(( PaddedToPackedKernelD1<scalar_t>), dim3(blocks), dim3(threads), 0, 0, inputs_padded.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_packed.data_ptr<scalar_t>(), batch_size, max_size, num_inputs); })); } else { AT_DISPATCH_FLOATING_TYPES( inputs_padded.type(), "padded_to_packed_kernel", ([&] { hipLaunchKernelGGL(( PaddedToPackedKernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, inputs_padded.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_packed.data_ptr<scalar_t>(), batch_size, max_size, num_inputs, D); })); } return inputs_packed; }
f7b410719f5fb36f146cc621aa6b4649865ccb94.cu
// Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. #include <ATen/ATen.h> #include <torch/extension.h> // Kernel for inputs_packed of shape (F, D), where D > 1 template <typename scalar_t> __global__ void PackedToPaddedKernel( const scalar_t* __restrict__ inputs_packed, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_padded, const size_t batch_size, const size_t max_size, const size_t num_inputs, const size_t D) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_padded // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { for (size_t j = 0; j < D; ++j) { inputs_padded[batch_idx * max_size * D + f * D + j] = inputs_packed[(start + f) * D + j]; } } } // Kernel for inputs of shape (F, 1) template <typename scalar_t> __global__ void PackedToPaddedKernelD1( const scalar_t* __restrict__ inputs_packed, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_padded, const size_t batch_size, const size_t max_size, const size_t num_inputs) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_padded // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { inputs_padded[batch_idx * max_size + f] = inputs_packed[start + f]; } } // Kernel for inputs_padded of shape (B, F, D), where D > 1 template <typename scalar_t> __global__ void PaddedToPackedKernel( const scalar_t* __restrict__ inputs_padded, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_packed, const size_t batch_size, const size_t max_size, const size_t num_inputs, const size_t D) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_packed // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { for (size_t j = 0; j < D; ++j) { inputs_packed[(start + f) * D + j] = inputs_padded[batch_idx * max_size * D + f * D + j]; } } } // Kernel for inputs_padded of shape (B, F, 1) template <typename scalar_t> __global__ void PaddedToPackedKernelD1( const scalar_t* __restrict__ inputs_padded, const int64_t* __restrict__ first_idxs, scalar_t* __restrict__ inputs_packed, const size_t batch_size, const size_t max_size, const size_t num_inputs) { // Batch elements split evenly across blocks (num blocks = batch_size) and // values for each element split across threads in the block. Each thread adds // the values of its respective input elements to the global inputs_packed // tensor. const size_t tid = threadIdx.x; const size_t batch_idx = blockIdx.x; const int64_t start = first_idxs[batch_idx]; const int64_t end = batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; const int num = end - start; for (size_t f = tid; f < num; f += blockDim.x) { inputs_packed[start + f] = inputs_padded[batch_idx * max_size + f]; } } at::Tensor PackedToPaddedCuda( const at::Tensor inputs_packed, const at::Tensor first_idxs, const int64_t max_size) { const int64_t num_inputs = inputs_packed.size(0); const int64_t batch_size = first_idxs.size(0); AT_ASSERTM( inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); const int64_t D = inputs_packed.size(1); at::Tensor inputs_padded = at::zeros({batch_size, max_size, D}, inputs_packed.options()); const int threads = 512; const int blocks = batch_size; if (D == 1) { AT_DISPATCH_FLOATING_TYPES( inputs_packed.type(), "packed_to_padded_d1_kernel", ([&] { PackedToPaddedKernelD1<scalar_t><<<blocks, threads>>>( inputs_packed.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_padded.data_ptr<scalar_t>(), batch_size, max_size, num_inputs); })); } else { AT_DISPATCH_FLOATING_TYPES( inputs_packed.type(), "packed_to_padded_kernel", ([&] { PackedToPaddedKernel<scalar_t><<<blocks, threads>>>( inputs_packed.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_padded.data_ptr<scalar_t>(), batch_size, max_size, num_inputs, D); })); } return inputs_padded; } at::Tensor PaddedToPackedCuda( const at::Tensor inputs_padded, const at::Tensor first_idxs, const int64_t num_inputs) { const int64_t batch_size = inputs_padded.size(0); const int64_t max_size = inputs_padded.size(1); AT_ASSERTM(batch_size == first_idxs.size(0), "sizes mismatch"); AT_ASSERTM( inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor"); const int64_t D = inputs_padded.size(2); at::Tensor inputs_packed = at::zeros({num_inputs, D}, inputs_padded.options()); const int threads = 512; const int blocks = batch_size; if (D == 1) { AT_DISPATCH_FLOATING_TYPES( inputs_padded.type(), "padded_to_packed_d1_kernel", ([&] { PaddedToPackedKernelD1<scalar_t><<<blocks, threads>>>( inputs_padded.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_packed.data_ptr<scalar_t>(), batch_size, max_size, num_inputs); })); } else { AT_DISPATCH_FLOATING_TYPES( inputs_padded.type(), "padded_to_packed_kernel", ([&] { PaddedToPackedKernel<scalar_t><<<blocks, threads>>>( inputs_padded.data_ptr<scalar_t>(), first_idxs.data_ptr<int64_t>(), inputs_packed.data_ptr<scalar_t>(), batch_size, max_size, num_inputs, D); })); } return inputs_packed; }
10cc711018b5460389ff3e349c281746c30054a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include"iostream" #include <stdio.h> #include "../common/book.h" #include"time.h" #define SIZE (100*1024*1024) int main( void ) { unsigned char *buffer = (unsigned char*)big_random_block( SIZE ); // capture the start time clock_t start, stop; start = clock(); unsigned int histo[256]; for (int i=0; i<256; i++) histo[i] = 0; for (int i=0; i<SIZE; i++) histo[buffer[i]]++; stop = clock(); float elapsedTime = (float)(stop - start) / (float)CLOCKS_PER_SEC * 1000.0f; printf( "Time to generate: %3.1f ms\n", elapsedTime ); long histoCount = 0; for (int i=0; i<256; i++) { histoCount += histo[i]; } printf( "Histogram Sum: %ld\n", histoCount ); free( buffer ); getchar(); return 0; }
10cc711018b5460389ff3e349c281746c30054a1.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include"iostream" #include <stdio.h> #include "../common/book.h" #include"time.h" #define SIZE (100*1024*1024) int main( void ) { unsigned char *buffer = (unsigned char*)big_random_block( SIZE ); // capture the start time clock_t start, stop; start = clock(); unsigned int histo[256]; for (int i=0; i<256; i++) histo[i] = 0; for (int i=0; i<SIZE; i++) histo[buffer[i]]++; stop = clock(); float elapsedTime = (float)(stop - start) / (float)CLOCKS_PER_SEC * 1000.0f; printf( "Time to generate: %3.1f ms\n", elapsedTime ); long histoCount = 0; for (int i=0; i<256; i++) { histoCount += histo[i]; } printf( "Histogram Sum: %ld\n", histoCount ); free( buffer ); getchar(); return 0; }
6d73599bd205963b0b4246d7c7beeaacbb562ec1.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> #include <hip/hip_runtime.h> #include "parallel_utility.cu" #include "matrix_utility.cu" #define DATA_DIM 3072 #define TRAIN_SAMPLE 500 #define TEST_SAMPLE 100 #define OUTPUT_LAYER_NODES 1 #define HIDDEN_LAYER_NODES 5 #define EPOCH 200 #define LEARNING_RATE_IP_HIDDEN 1 #define LEARNING_RATE_HIDDEN_OP 1 #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) using namespace std; ParallelUtility pu = ParallelUtility(); MatrixUtility mu = MatrixUtility(); class Initializer { public: void load_data(double **(&data), double *labels, int row, int col, const char *filename){ float ch; FILE *fp = fopen(filename, "r"); if (!fp) return; fscanf(fp, "%f", &ch); double **dataset = NULL; mu.init_2D_mat(dataset, row, col); int i = 0, j = 0; for (int ct = 0; ct < (row * col); ct++) dataset[i][j++] = ch; if (j == col) { j = 0; i++; } fscanf(fp, "%f", &ch); } fclose(fp); fp = NULL IFOR(k, 0, row) { labels[k] = dataset[k][col-1]; for (j = 0; j < col - 1; ++j) data[k][j] = dataset[k][j]; } } void init_weights(double **(&w), int row, int col) { IFOR(i, 0, row) IFOR(j, 0, col) w[i][j] = ((double)rand() / (double)RAND_MAX); } void init_biases(double *(&b), int row) { IFOR(i, 0, row) b[i] = ((double)rand() / (double)RAND_MAX); } }; class NeuralNetwork { public: double **dsigmoid(double **a, int r, int c) { double **one_minus_sigmoid_a = NULL; double **sigmoid_a = sigmoid(a, r, c); mu.init_2D_mat(one_minus_sigmoid_a, r, c); IFOR(i, 0, r) IFOR(j, 0, c) one_minus_sigmoid_a[i][j] = 1 - sigmoid_a[i][j]; return pu.cu_mat_elementwise_multiply_helper(sigmoid_a, one_minus_sigmoid_a, r, c); } double **sigmoid(double **mat, int r, int c) { double **s; mu.init_2D_mat(s, r, c); UFOR(i, 0, r) { UFOR(j, 0, c) { s[i][j] = 1 / (1 + exp(-mat[i][j])); } } return s; } void back_prop(double **(&X), double *(&Y), double **(&W1), double **(&W2), double **(&A1), double **(&A2), double **(&dW1), double **(&dW2), double **(&dA1), double **(&dA2), double *(&db1), double *(&db2)) { double **one; mu.init_2D_mat(one, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); UFOR(i, 0, TRAIN_SAMPLE) UFOR(j, 0, HIDDEN_LAYER_NODES) one[i][j] = 1; double **dZ2 = mu.diff_2D_mat_1D_mat(A2, Y, TRAIN_SAMPLE, OUTPUT_LAYER_NODES); double **dZ2_trans = pu.cuda_mat_transpose_helper(dZ2, TRAIN_SAMPLE, OUTPUT_LAYER_NODES); dW2 = pu.cuda_mat_multiply_helper(dZ2, A1, OUTPUT_LAYER_NODES, TRAIN_SAMPLE, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); dW2 = pu.cu_mat_scalar_multiply_helper(dW2, 1/TRAIN_SAMPLE, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); db2 = mu.sum_across_2nd_dim(dZ2_trans, OUTPUT_LAYER_NODES, TRAIN_SAMPLE); db2 = pu.cu_vec_scalar_multiply_helper(db2, 1/TRAIN_SAMPLE, OUTPUT_LAYER_NODES); double **A1_square = pu.cu_mat_elementwise_multiply_helper(A1, A1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); A1_square = pu.cu_mat_scalar_multiply_helper(A1_square, -1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); A1_square = pu.cu_addition_helper(one, A1_square, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **W2xdZ2 = pu.cuda_mat_multiply_helper(dZ2, W2, TRAIN_SAMPLE, OUTPUT_LAYER_NODES, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); double **derivative_Z1 = dsigmoid(A1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **dZ1 = pu.cu_mat_elementwise_multiply_helper(derivative_Z1, W2xdZ2, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **dZ1_trans = pu.cuda_mat_transpose_helper(dZ1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); dW1 = pu.cuda_mat_multiply_helper(dZ1_trans, X, HIDDEN_LAYER_NODES, TRAIN_SAMPLE, TRAIN_SAMPLE, DATA_DIM); dW1 = pu.cu_mat_scalar_multiply_helper(dW1, 1/TRAIN_SAMPLE, HIDDEN_LAYER_NODES, DATA_DIM); db1 = mu.sum_across_2nd_dim(dZ1_trans, HIDDEN_LAYER_NODES, TRAIN_SAMPLE); db1 = pu.cu_vec_scalar_multiply_helper(db1, 1/TRAIN_SAMPLE, HIDDEN_LAYER_NODES); } void forward_prop(double **(&X), double **(&W1), double **(&W2), double *(&b1), double *(&b2), double **(&Z1), double **(&Z2), double **(&A1), double **(&A2), int examples) { double **W1_trans = NULL; W1_trans = pu.cuda_mat_transpose_helper(W1, HIDDEN_LAYER_NODES, DATA_DIM); Z1 = pu.cuda_mat_multiply_helper(X, W1_trans, examples, DATA_DIM, DATA_DIM, HIDDEN_LAYER_NODES); A1 = sigmoid(Z1, examples, HIDDEN_LAYER_NODES); A1 = mu.add_2D_mat_1D_mat(A1, b1, examples, HIDDEN_LAYER_NODES); double **W2_trans = pu.cuda_mat_transpose_helper(W2, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); Z2 = pu.cuda_mat_multiply_helper(A1, W2_trans, examples, HIDDEN_LAYER_NODES, HIDDEN_LAYER_NODES, OUTPUT_LAYER_NODES); A2 = sigmoid(Z2, examples, OUTPUT_LAYER_NODES); A2 = mu.add_2D_mat_1D_mat(A2, b2, examples, OUTPUT_LAYER_NODES); } void update_parameter(double **(&W1), double **(&W2), double *(&b1), double *(&b2), double **(&dW1), double **(&dW2), double *(&db1), double *(&db2)) { dW2 = pu.cu_mat_scalar_multiply_helper(dW2, (-1 * LEARNING_RATE_HIDDEN_OP), OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); dW1 = pu.cu_mat_scalar_multiply_helper(dW1, (-1 * LEARNING_RATE_IP_HIDDEN), HIDDEN_LAYER_NODES, DATA_DIM); W1 = pu.cu_addition_helper(W1, dW1, HIDDEN_LAYER_NODES, DATA_DIM); W2 = pu.cu_addition_helper (W2, dW2, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); db1 = pu.cu_vec_scalar_multiply_helper(db1, (-1 * LEARNING_RATE_IP_HIDDEN), HIDDEN_LAYER_NODES); db2 = pu.cu_vec_scalar_multiply_helper(db2, (-1 * LEARNING_RATE_HIDDEN_OP), OUTPUT_LAYER_NODES); b1 = pu.cu_vec_addition_helper(b1, db1, HIDDEN_LAYER_NODES); b2 = pu.cu_vec_addition_helper(b2, db2, OUTPUT_LAYER_NODES); } };
6d73599bd205963b0b4246d7c7beeaacbb562ec1.cu
#include <bits/stdc++.h> #include <cuda.h> #include "parallel_utility.cu" #include "matrix_utility.cu" #define DATA_DIM 3072 #define TRAIN_SAMPLE 500 #define TEST_SAMPLE 100 #define OUTPUT_LAYER_NODES 1 #define HIDDEN_LAYER_NODES 5 #define EPOCH 200 #define LEARNING_RATE_IP_HIDDEN 1 #define LEARNING_RATE_HIDDEN_OP 1 #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) using namespace std; ParallelUtility pu = ParallelUtility(); MatrixUtility mu = MatrixUtility(); class Initializer { public: void load_data(double **(&data), double *labels, int row, int col, const char *filename){ float ch; FILE *fp = fopen(filename, "r"); if (!fp) return; fscanf(fp, "%f", &ch); double **dataset = NULL; mu.init_2D_mat(dataset, row, col); int i = 0, j = 0; for (int ct = 0; ct < (row * col); ct++) dataset[i][j++] = ch; if (j == col) { j = 0; i++; } fscanf(fp, "%f", &ch); } fclose(fp); fp = NULL IFOR(k, 0, row) { labels[k] = dataset[k][col-1]; for (j = 0; j < col - 1; ++j) data[k][j] = dataset[k][j]; } } void init_weights(double **(&w), int row, int col) { IFOR(i, 0, row) IFOR(j, 0, col) w[i][j] = ((double)rand() / (double)RAND_MAX); } void init_biases(double *(&b), int row) { IFOR(i, 0, row) b[i] = ((double)rand() / (double)RAND_MAX); } }; class NeuralNetwork { public: double **dsigmoid(double **a, int r, int c) { double **one_minus_sigmoid_a = NULL; double **sigmoid_a = sigmoid(a, r, c); mu.init_2D_mat(one_minus_sigmoid_a, r, c); IFOR(i, 0, r) IFOR(j, 0, c) one_minus_sigmoid_a[i][j] = 1 - sigmoid_a[i][j]; return pu.cu_mat_elementwise_multiply_helper(sigmoid_a, one_minus_sigmoid_a, r, c); } double **sigmoid(double **mat, int r, int c) { double **s; mu.init_2D_mat(s, r, c); UFOR(i, 0, r) { UFOR(j, 0, c) { s[i][j] = 1 / (1 + exp(-mat[i][j])); } } return s; } void back_prop(double **(&X), double *(&Y), double **(&W1), double **(&W2), double **(&A1), double **(&A2), double **(&dW1), double **(&dW2), double **(&dA1), double **(&dA2), double *(&db1), double *(&db2)) { double **one; mu.init_2D_mat(one, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); UFOR(i, 0, TRAIN_SAMPLE) UFOR(j, 0, HIDDEN_LAYER_NODES) one[i][j] = 1; double **dZ2 = mu.diff_2D_mat_1D_mat(A2, Y, TRAIN_SAMPLE, OUTPUT_LAYER_NODES); double **dZ2_trans = pu.cuda_mat_transpose_helper(dZ2, TRAIN_SAMPLE, OUTPUT_LAYER_NODES); dW2 = pu.cuda_mat_multiply_helper(dZ2, A1, OUTPUT_LAYER_NODES, TRAIN_SAMPLE, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); dW2 = pu.cu_mat_scalar_multiply_helper(dW2, 1/TRAIN_SAMPLE, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); db2 = mu.sum_across_2nd_dim(dZ2_trans, OUTPUT_LAYER_NODES, TRAIN_SAMPLE); db2 = pu.cu_vec_scalar_multiply_helper(db2, 1/TRAIN_SAMPLE, OUTPUT_LAYER_NODES); double **A1_square = pu.cu_mat_elementwise_multiply_helper(A1, A1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); A1_square = pu.cu_mat_scalar_multiply_helper(A1_square, -1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); A1_square = pu.cu_addition_helper(one, A1_square, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **W2xdZ2 = pu.cuda_mat_multiply_helper(dZ2, W2, TRAIN_SAMPLE, OUTPUT_LAYER_NODES, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); double **derivative_Z1 = dsigmoid(A1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **dZ1 = pu.cu_mat_elementwise_multiply_helper(derivative_Z1, W2xdZ2, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); double **dZ1_trans = pu.cuda_mat_transpose_helper(dZ1, TRAIN_SAMPLE, HIDDEN_LAYER_NODES); dW1 = pu.cuda_mat_multiply_helper(dZ1_trans, X, HIDDEN_LAYER_NODES, TRAIN_SAMPLE, TRAIN_SAMPLE, DATA_DIM); dW1 = pu.cu_mat_scalar_multiply_helper(dW1, 1/TRAIN_SAMPLE, HIDDEN_LAYER_NODES, DATA_DIM); db1 = mu.sum_across_2nd_dim(dZ1_trans, HIDDEN_LAYER_NODES, TRAIN_SAMPLE); db1 = pu.cu_vec_scalar_multiply_helper(db1, 1/TRAIN_SAMPLE, HIDDEN_LAYER_NODES); } void forward_prop(double **(&X), double **(&W1), double **(&W2), double *(&b1), double *(&b2), double **(&Z1), double **(&Z2), double **(&A1), double **(&A2), int examples) { double **W1_trans = NULL; W1_trans = pu.cuda_mat_transpose_helper(W1, HIDDEN_LAYER_NODES, DATA_DIM); Z1 = pu.cuda_mat_multiply_helper(X, W1_trans, examples, DATA_DIM, DATA_DIM, HIDDEN_LAYER_NODES); A1 = sigmoid(Z1, examples, HIDDEN_LAYER_NODES); A1 = mu.add_2D_mat_1D_mat(A1, b1, examples, HIDDEN_LAYER_NODES); double **W2_trans = pu.cuda_mat_transpose_helper(W2, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); Z2 = pu.cuda_mat_multiply_helper(A1, W2_trans, examples, HIDDEN_LAYER_NODES, HIDDEN_LAYER_NODES, OUTPUT_LAYER_NODES); A2 = sigmoid(Z2, examples, OUTPUT_LAYER_NODES); A2 = mu.add_2D_mat_1D_mat(A2, b2, examples, OUTPUT_LAYER_NODES); } void update_parameter(double **(&W1), double **(&W2), double *(&b1), double *(&b2), double **(&dW1), double **(&dW2), double *(&db1), double *(&db2)) { dW2 = pu.cu_mat_scalar_multiply_helper(dW2, (-1 * LEARNING_RATE_HIDDEN_OP), OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); dW1 = pu.cu_mat_scalar_multiply_helper(dW1, (-1 * LEARNING_RATE_IP_HIDDEN), HIDDEN_LAYER_NODES, DATA_DIM); W1 = pu.cu_addition_helper(W1, dW1, HIDDEN_LAYER_NODES, DATA_DIM); W2 = pu.cu_addition_helper (W2, dW2, OUTPUT_LAYER_NODES, HIDDEN_LAYER_NODES); db1 = pu.cu_vec_scalar_multiply_helper(db1, (-1 * LEARNING_RATE_IP_HIDDEN), HIDDEN_LAYER_NODES); db2 = pu.cu_vec_scalar_multiply_helper(db2, (-1 * LEARNING_RATE_HIDDEN_OP), OUTPUT_LAYER_NODES); b1 = pu.cu_vec_addition_helper(b1, db1, HIDDEN_LAYER_NODES); b2 = pu.cu_vec_addition_helper(b2, db2, OUTPUT_LAYER_NODES); } };
7fa5c11c164b34208b30855f6b1d9777483d76f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe9 { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col); template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<int>* const blob_dilation_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<TypeParam>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_nd_gpu_kernel<TypeParam, 2>), dim3(grid_dim), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe9
7fa5c11c164b34208b30855f6b1d9777483d76f1.cu
#include <vector> #include "gtest/gtest.h" #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layers/im2col_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/test/test_caffe_main.hpp" namespace caffe9 { // Forward declare kernel functions template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col); template <typename Dtype> class Im2colKernelTest : public GPUDeviceTest<Dtype> { protected: Im2colKernelTest() // big so launches > 1024 threads : blob_bottom_(new Blob<Dtype>(5, 500, 15, 15)), blob_kernel_shape_(new Blob<int>()), blob_stride_(new Blob<int>()), blob_pad_(new Blob<int>()), blob_dilation_(new Blob<int>()), blob_top_(new Blob<Dtype>()), blob_top_cpu_(new Blob<Dtype>()) { FillerParameter filler_param; GaussianFiller<Dtype> filler(filler_param); filler.Fill(this->blob_bottom_); vector<int> dim_blob_shape(1, 2); blob_kernel_shape_->Reshape(dim_blob_shape); blob_stride_->Reshape(dim_blob_shape); blob_pad_->Reshape(dim_blob_shape); blob_dilation_->Reshape(dim_blob_shape); height_ = blob_bottom_->height(); width_ = blob_bottom_->width(); channels_ = blob_bottom_->channels(); pad_ = 0; stride_ = 2; dilation_ = 3; kernel_size_ = 3; height_col_ = (height_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; width_col_ = (width_ + 2 * pad_ - (dilation_ * (kernel_size_ - 1) + 1)) / stride_ + 1; for (int i = 0; i < 2; ++i) { blob_kernel_shape_->mutable_cpu_data()[i] = kernel_size_; blob_stride_->mutable_cpu_data()[i] = stride_; blob_pad_->mutable_cpu_data()[i] = pad_; blob_dilation_->mutable_cpu_data()[i] = dilation_; } } virtual ~Im2colKernelTest() { delete blob_bottom_; delete blob_top_; delete blob_top_cpu_; delete blob_kernel_shape_; delete blob_stride_; delete blob_pad_; delete blob_dilation_; } Blob<int>* const blob_kernel_shape_; Blob<int>* const blob_stride_; Blob<int>* const blob_pad_; Blob<int>* const blob_dilation_; Blob<Dtype>* const blob_bottom_; Blob<Dtype>* const blob_top_; Blob<Dtype>* const blob_top_cpu_; int height_; int width_; int channels_; int pad_; int stride_; int dilation_; int kernel_size_; int height_col_; int width_col_; }; TYPED_TEST_CASE(Im2colKernelTest, TestDtypes); TYPED_TEST(Im2colKernelTest, Test2D) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); const TypeParam* bottom_data = this->blob_bottom_->gpu_data(); TypeParam* top_data = this->blob_top_->mutable_gpu_data(); TypeParam* cpu_data = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_cpu(this->blob_bottom_->cpu_data() + this->blob_bottom_->offset(n), this->channels_, this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, cpu_data + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { int grid_dim = default_grid_dim/grid_div; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<TypeParam><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data + this->blob_bottom_->offset(n), this->height_, this->width_, this->kernel_size_, this->kernel_size_, this->pad_, this->pad_, this->stride_, this->stride_, this->dilation_, this->dilation_, this->height_col_, this->width_col_, top_data + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = cpu_data[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } TYPED_TEST(Im2colKernelTest, TestND) { // Reshape the blobs to correct size for im2col output this->blob_top_->Reshape(this->blob_bottom_->num(), this->channels_ * this->kernel_size_ * this->kernel_size_, this->height_col_, this->width_col_); this->blob_top_cpu_->ReshapeLike(*this->blob_top_); const TypeParam* bottom_data_cpu = this->blob_bottom_->cpu_data(); TypeParam* top_data_cpu = this->blob_top_cpu_->mutable_cpu_data(); // CPU Version for (int n = 0; n < this->blob_bottom_->num(); ++n) { im2col_nd_cpu(bottom_data_cpu + this->blob_bottom_->offset(n), 2, this->blob_bottom_->shape().data() + 1, this->blob_top_cpu_->shape().data() + 1, this->blob_kernel_shape_->cpu_data(), this->blob_pad_->cpu_data(), this->blob_stride_->cpu_data(), this->blob_dilation_->cpu_data(), top_data_cpu + this->blob_top_cpu_->offset(n)); } // GPU version int num_kernels = this->channels_ * this->height_col_ * this->width_col_; int default_grid_dim = CAFFE_GET_BLOCKS(num_kernels); const TypeParam* bottom_data_gpu = this->blob_bottom_->gpu_data(); // Launch with different grid sizes for (int grid_div = 2; grid_div <= 8; grid_div++) { for (int n = 0; n < this->blob_bottom_->num(); ++n) { const int grid_dim = default_grid_dim / grid_div; TypeParam* top_data_gpu = this->blob_top_->mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) im2col_nd_gpu_kernel<TypeParam, 2><<<grid_dim, CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data_gpu + this->blob_bottom_->offset(n), this->blob_bottom_->gpu_shape() + 1, this->blob_top_->gpu_shape() + 1, this->blob_kernel_shape_->gpu_data(), this->blob_pad_->gpu_data(), this->blob_stride_->gpu_data(), this->blob_dilation_->gpu_data(), top_data_gpu + this->blob_top_->offset(n)); CUDA_POST_KERNEL_CHECK; } // Compare results against CPU version for (int i = 0; i < this->blob_top_->count(); ++i) { TypeParam cpuval = top_data_cpu[i]; TypeParam gpuval = this->blob_top_->cpu_data()[i]; EXPECT_EQ(cpuval, gpuval); if (cpuval != gpuval) { break; } } } } } // namespace caffe9
256084fa61dcd90e9e327f46228dc18bd3779ac0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <time.h> #include <hip/hip_runtime.h> #define maxThreads 512 /* This code was developed and tested on cuda3 */ __global__ void getmaxcu(unsigned int num[], unsigned int size){ unsigned int tid = threadIdx.x; unsigned int gloid = blockIdx.x*blockDim.x+threadIdx.x; __shared__ int sdata[maxThreads]; // shared data sdata[tid]=num[gloid]; if(gloid>=size){ sdata[threadIdx.x]=0; } /*if(n<size){ int tSize = size/n; if(tid<(size%n) tSize++; for(int i; i<tSize; i++) if(sdata[tid]<num[glo } */ __syncthreads(); //get a block max by performing a tree-structured //reduction akin to that depicted in slide 18 of //the lecture 8 pp for (int stride = blockDim.x / 2; stride > 0; stride = stride / 2) { if (tid < stride) { if (sdata[tid] < sdata[tid + stride]) { sdata[tid] = sdata[tid + stride]; } } __syncthreads(); } if(tid==0){//store the block maxes in global memory num[blockIdx.x]=sdata[0]; } } int main(int argc, char *argv[]) { hipDeviceProp_t prop; hipError_t propErr = hipGetDeviceProperties(&prop, 0); if (propErr != hipSuccess) { printf("unable to get device properties\n"); } unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; //pointer to the array unsigned int* cudaNumbers; unsigned int thread; unsigned int block; if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; hipMalloc((void**)&cudaNumbers, (size * sizeof(unsigned int))); hipMemcpy(cudaNumbers, numbers, (size * sizeof(unsigned int)), hipMemcpyHostToDevice); if (size%maxThreads != 0) { size = (size/maxThreads+1)*maxThreads; } unsigned int cudaSize=size; thread = maxThreads; block = size/thread; while(block>1){ hipLaunchKernelGGL(( getmaxcu), dim3(block), dim3(maxThreads), 0, 0, cudaNumbers, cudaSize); cudaSize=cudaSize/maxThreads; block = cudaSize/maxThreads; } hipLaunchKernelGGL(( getmaxcu), dim3(1), dim3(block), 0, 0, cudaNumbers, block); hipMemcpy(numbers, cudaNumbers, sizeof(unsigned int), hipMemcpyDeviceToHost);//only copies back the max, which should be in the first element of the array printf(" The maximum number in the array is: %u\n", numbers[0]); free(numbers); hipFree(cudaNumbers); exit(0); } /* input: pointer to an array of long int number of elements in the array output: the maximum number of the array */
256084fa61dcd90e9e327f46228dc18bd3779ac0.cu
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <cuda.h> #define maxThreads 512 /* This code was developed and tested on cuda3 */ __global__ void getmaxcu(unsigned int num[], unsigned int size){ unsigned int tid = threadIdx.x; unsigned int gloid = blockIdx.x*blockDim.x+threadIdx.x; __shared__ int sdata[maxThreads]; // shared data sdata[tid]=num[gloid]; if(gloid>=size){ sdata[threadIdx.x]=0; } /*if(n<size){ int tSize = size/n; if(tid<(size%n) tSize++; for(int i; i<tSize; i++) if(sdata[tid]<num[glo } */ __syncthreads(); //get a block max by performing a tree-structured //reduction akin to that depicted in slide 18 of //the lecture 8 pp for (int stride = blockDim.x / 2; stride > 0; stride = stride / 2) { if (tid < stride) { if (sdata[tid] < sdata[tid + stride]) { sdata[tid] = sdata[tid + stride]; } } __syncthreads(); } if(tid==0){//store the block maxes in global memory num[blockIdx.x]=sdata[0]; } } int main(int argc, char *argv[]) { cudaDeviceProp prop; cudaError_t propErr = cudaGetDeviceProperties(&prop, 0); if (propErr != cudaSuccess) { printf("unable to get device properties\n"); } unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; //pointer to the array unsigned int* cudaNumbers; unsigned int thread; unsigned int block; if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; cudaMalloc((void**)&cudaNumbers, (size * sizeof(unsigned int))); cudaMemcpy(cudaNumbers, numbers, (size * sizeof(unsigned int)), cudaMemcpyHostToDevice); if (size%maxThreads != 0) { size = (size/maxThreads+1)*maxThreads; } unsigned int cudaSize=size; thread = maxThreads; block = size/thread; while(block>1){ getmaxcu<<<block, maxThreads>>>(cudaNumbers, cudaSize); cudaSize=cudaSize/maxThreads; block = cudaSize/maxThreads; } getmaxcu<<<1, block>>>(cudaNumbers, block); cudaMemcpy(numbers, cudaNumbers, sizeof(unsigned int), cudaMemcpyDeviceToHost);//only copies back the max, which should be in the first element of the array printf(" The maximum number in the array is: %u\n", numbers[0]); free(numbers); cudaFree(cudaNumbers); exit(0); } /* input: pointer to an array of long int number of elements in the array output: the maximum number of the array */
811f6871f700881d6b90aa0bb375a2646b64dc44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/rmac_regions_op.h" namespace cub { template <typename KeyT, typename ValueT> inline __host__ __device__ bool operator<( const hipcub::KeyValuePair<KeyT, ValueT>& kv1, const hipcub::KeyValuePair<KeyT, ValueT>& kv2) { return (kv1.value < kv2.value) || (kv1.value == kv2.value && kv2.key < kv1.key); } } // namespace cub namespace caffe2 { namespace { __global__ void NumRMACRegionsKernel( const int W, const int H, const int min_step, const int max_step, const float overlap, const int scales, int* num_rois_data) { // steps(idx) regions for long dimension typedef hipcub::KeyValuePair<int, float> KeyValuePair; // <step, value> KeyValuePair kv, min_kv; min_kv.value = FLT_MAX; // Local reduction int minW = min(H, W); int diff = max(H, W) - minW; CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) { kv.key = min_step + index; float b = diff / (1.0 * kv.key); kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap); if (kv < min_kv) { min_kv = kv; } } // Block-wise arg-min reduction to find step int step; { typedef hipcub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; min_kv = BlockReduce(temp_storage).Reduce(min_kv, hipcub::Min()); __shared__ int step_shared; if (threadIdx.x == 0) { step_shared = min_kv.key; } __syncthreads(); step = step_shared; } // Region overplus per dimension int Wd = (W > H) ? step : 0; int Hd = (H > W) ? step : 0; // Local reduction to compute the total number of rois at all scales int num_rois = 0; CUDA_1D_KERNEL_LOOP(index, scales) { int l = index + 1; int region_size = 2 * minW / (l + 1); num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0; } // Block-wise sum reduction to compute num_rois at all scales { typedef hipcub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; num_rois = BlockReduce(temp_storage).Sum(num_rois); } if (threadIdx.x == 0) { num_rois_data[0] = num_rois; num_rois_data[1] = Wd; num_rois_data[2] = Hd; } } __global__ void RMACRegionsKernel( const int W, const int H, const int N, const int* num_rois_data, float* output) { int num_rois = num_rois_data[0]; int Wd = num_rois_data[1]; int Hd = num_rois_data[2]; // Block-wide temp shared storage for intermediate ROI results to avoid // uncoalesced writes to global mem __shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5]; CUDA_1D_KERNEL_LOOP(index, N) { int batch_id = index / num_rois; int roi_id = index % num_rois; int roi[5]; roi[0] = batch_id; // Find the scale corresponding to this index and the roi_id relative // to the scale. int l = 0; int num_rois_at_scale = 0; do { roi_id -= num_rois_at_scale; l++; num_rois_at_scale = (l + Wd) * (l + Hd); } while (roi_id - num_rois_at_scale >= 0); int region_size = 2 * min(H, W) / (l + 1); float bw = (l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0; float bh = (l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0; int i = roi_id / (l + Hd); int j = roi_id % (l + Hd); roi[1] = bw * i; roi[2] = bh * j; // Careful with the borders if (roi[1] + region_size > W) { roi[1] -= (roi[1] + region_size - W); } if (roi[2] + region_size > H) { roi[2] -= (roi[2] + region_size - H); } roi[3] = roi[1] + region_size - 1; roi[4] = roi[2] + region_size - 1; // Writing directly to output (global memory) will result in uncoalesced // writes. Write output to shared mem first and then write ROI results to // global output in a coalesced manner. __syncthreads(); // Since output_shared is reused across loop iterations for (int i = 0; i < 5; ++i) { output_shared[threadIdx.x * 5 + i] = roi[i]; } __syncthreads(); int offset = index - threadIdx.x; float* output_offset = output + offset * 5; int num_threads = min(blockDim.x, N - offset); // Active threads in block for (int i = 0; i < 5; ++i) { output_offset[num_threads * i + threadIdx.x] = output_shared[num_threads * i + threadIdx.x]; } } } } // namespace template <> bool RMACRegionsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(0); // Input tensor // RoIs if (X.size() == 0) { return true; } int batch_size = X.dim32(0); int H = X.dim32(2); int W = X.dim32(3); // Compute number of regions int min_step = 1; int max_step = 6; ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd hipLaunchKernelGGL(( NumRMACRegionsKernel), dim3(1), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), W, H, min_step, max_step, overlap_, scales_, num_rois_.mutable_data<int>()); // Bit awkward, but the size of the output tensor depends on the output of // NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU // to Resize() output appropriately. int num_rois = 0; context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois); int N = batch_size * num_rois; auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2] // Compute region coordinates hipLaunchKernelGGL(( RMACRegionsKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), W, H, N, num_rois_.data<int>(), output->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>); } // namespace caffe2
811f6871f700881d6b90aa0bb375a2646b64dc44.cu
#include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/rmac_regions_op.h" namespace cub { template <typename KeyT, typename ValueT> inline __host__ __device__ bool operator<( const cub::KeyValuePair<KeyT, ValueT>& kv1, const cub::KeyValuePair<KeyT, ValueT>& kv2) { return (kv1.value < kv2.value) || (kv1.value == kv2.value && kv2.key < kv1.key); } } // namespace cub namespace caffe2 { namespace { __global__ void NumRMACRegionsKernel( const int W, const int H, const int min_step, const int max_step, const float overlap, const int scales, int* num_rois_data) { // steps(idx) regions for long dimension typedef cub::KeyValuePair<int, float> KeyValuePair; // <step, value> KeyValuePair kv, min_kv; min_kv.value = FLT_MAX; // Local reduction int minW = min(H, W); int diff = max(H, W) - minW; CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) { kv.key = min_step + index; float b = diff / (1.0 * kv.key); kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap); if (kv < min_kv) { min_kv = kv; } } // Block-wise arg-min reduction to find step int step; { typedef cub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; min_kv = BlockReduce(temp_storage).Reduce(min_kv, cub::Min()); __shared__ int step_shared; if (threadIdx.x == 0) { step_shared = min_kv.key; } __syncthreads(); step = step_shared; } // Region overplus per dimension int Wd = (W > H) ? step : 0; int Hd = (H > W) ? step : 0; // Local reduction to compute the total number of rois at all scales int num_rois = 0; CUDA_1D_KERNEL_LOOP(index, scales) { int l = index + 1; int region_size = 2 * minW / (l + 1); num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0; } // Block-wise sum reduction to compute num_rois at all scales { typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; num_rois = BlockReduce(temp_storage).Sum(num_rois); } if (threadIdx.x == 0) { num_rois_data[0] = num_rois; num_rois_data[1] = Wd; num_rois_data[2] = Hd; } } __global__ void RMACRegionsKernel( const int W, const int H, const int N, const int* num_rois_data, float* output) { int num_rois = num_rois_data[0]; int Wd = num_rois_data[1]; int Hd = num_rois_data[2]; // Block-wide temp shared storage for intermediate ROI results to avoid // uncoalesced writes to global mem __shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5]; CUDA_1D_KERNEL_LOOP(index, N) { int batch_id = index / num_rois; int roi_id = index % num_rois; int roi[5]; roi[0] = batch_id; // Find the scale corresponding to this index and the roi_id relative // to the scale. int l = 0; int num_rois_at_scale = 0; do { roi_id -= num_rois_at_scale; l++; num_rois_at_scale = (l + Wd) * (l + Hd); } while (roi_id - num_rois_at_scale >= 0); int region_size = 2 * min(H, W) / (l + 1); float bw = (l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0; float bh = (l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0; int i = roi_id / (l + Hd); int j = roi_id % (l + Hd); roi[1] = bw * i; roi[2] = bh * j; // Careful with the borders if (roi[1] + region_size > W) { roi[1] -= (roi[1] + region_size - W); } if (roi[2] + region_size > H) { roi[2] -= (roi[2] + region_size - H); } roi[3] = roi[1] + region_size - 1; roi[4] = roi[2] + region_size - 1; // Writing directly to output (global memory) will result in uncoalesced // writes. Write output to shared mem first and then write ROI results to // global output in a coalesced manner. __syncthreads(); // Since output_shared is reused across loop iterations for (int i = 0; i < 5; ++i) { output_shared[threadIdx.x * 5 + i] = roi[i]; } __syncthreads(); int offset = index - threadIdx.x; float* output_offset = output + offset * 5; int num_threads = min(blockDim.x, N - offset); // Active threads in block for (int i = 0; i < 5; ++i) { output_offset[num_threads * i + threadIdx.x] = output_shared[num_threads * i + threadIdx.x]; } } } } // namespace template <> bool RMACRegionsOp<CUDAContext>::RunOnDevice() { const auto& X = Input(0); // Input tensor // RoIs if (X.size() == 0) { return true; } int batch_size = X.dim32(0); int H = X.dim32(2); int W = X.dim32(3); // Compute number of regions int min_step = 1; int max_step = 6; ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd NumRMACRegionsKernel<<< 1, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( W, H, min_step, max_step, overlap_, scales_, num_rois_.mutable_data<int>()); // Bit awkward, but the size of the output tensor depends on the output of // NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU // to Resize() output appropriately. int num_rois = 0; context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois); int N = batch_size * num_rois; auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2] // Compute region coordinates RMACRegionsKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( W, H, N, num_rois_.data<int>(), output->template mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>); } // namespace caffe2
2bfd95c1fcfa3d1dffab6b45df115daed2f9283e.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * MIT License * * Copyright (c) 2020 OrthogonalHawk * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * *****************************************************************************/ /****************************************************************************** * * @file falcon_dsp_cuda_utils.cu * @author OrthogonalHawk * @date 27-Jan-2020 * * @brief CUDA implementation of general purpose Digital Signal Processing * utility functions. * * @section DESCRIPTION * * Implements CUDA versions of general purpose Digital Signal Processing utility * functions. * * @section HISTORY * * 27-Jan-2020 OrthogonalHawk File created. * *****************************************************************************/ /****************************************************************************** * INCLUDE_FILES *****************************************************************************/ #include <iostream> #include <stdint.h> #include "utilities/falcon_dsp_cuda_utils.h" /****************************************************************************** * CONSTANTS *****************************************************************************/ /****************************************************************************** * ENUMS & TYPEDEFS *****************************************************************************/ /****************************************************************************** * MACROS *****************************************************************************/ namespace falcon_dsp { /****************************************************************************** * FUNCTION IMPLEMENTATION *****************************************************************************/ /* @brief CUDA error checking function * @description Checks for errors after invoking a CUDA API function, prints the * result to stderr and optionally aborts execution. * @param code - error code to check * @param file - source file where the error check is occurring * @param line - source file line where the error check is occurring * @param abort - boolean indicating whether to abort program execution on error * @return None * * @note Implementation from: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api */ void gpuAssert(hipError_t code, const char *file, int line, bool abort) { if (code != hipSuccess) { fprintf(stderr,"CUDA ERROR: %s %s:%d\n", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } /****************************************************************************** * CLASS IMPLEMENTATION *****************************************************************************/ }
2bfd95c1fcfa3d1dffab6b45df115daed2f9283e.cu
/****************************************************************************** * * MIT License * * Copyright (c) 2020 OrthogonalHawk * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * *****************************************************************************/ /****************************************************************************** * * @file falcon_dsp_cuda_utils.cu * @author OrthogonalHawk * @date 27-Jan-2020 * * @brief CUDA implementation of general purpose Digital Signal Processing * utility functions. * * @section DESCRIPTION * * Implements CUDA versions of general purpose Digital Signal Processing utility * functions. * * @section HISTORY * * 27-Jan-2020 OrthogonalHawk File created. * *****************************************************************************/ /****************************************************************************** * INCLUDE_FILES *****************************************************************************/ #include <iostream> #include <stdint.h> #include "utilities/falcon_dsp_cuda_utils.h" /****************************************************************************** * CONSTANTS *****************************************************************************/ /****************************************************************************** * ENUMS & TYPEDEFS *****************************************************************************/ /****************************************************************************** * MACROS *****************************************************************************/ namespace falcon_dsp { /****************************************************************************** * FUNCTION IMPLEMENTATION *****************************************************************************/ /* @brief CUDA error checking function * @description Checks for errors after invoking a CUDA API function, prints the * result to stderr and optionally aborts execution. * @param code - error code to check * @param file - source file where the error check is occurring * @param line - source file line where the error check is occurring * @param abort - boolean indicating whether to abort program execution on error * @return None * * @note Implementation from: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api */ void gpuAssert(cudaError_t code, const char *file, int line, bool abort) { if (code != cudaSuccess) { fprintf(stderr,"CUDA ERROR: %s %s:%d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } /****************************************************************************** * CLASS IMPLEMENTATION *****************************************************************************/ }
f7c599eb5e551ab141b8625c1f72a2bd51d3d7f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ChangeInputWeightsKernel( float *inputWeights, float *inputWeightDeltas, float *outputWeights, float *outputDeltas, float *inputWeightRTRLDerivatives, float trainingRate, float momentum ) { int weightId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (weightId < D_HIDDEN_UNITS * D_INPUT_UNITS) { float gradient = 0; for (int i = 0; i < D_OUTPUT_UNITS; i++) { float sum = 0; for (int j = 0; j < D_HIDDEN_UNITS; j++) { sum += outputWeights[i * D_HIDDEN_UNITS + j] * inputWeightRTRLDerivatives[j * D_HIDDEN_UNITS * D_INPUT_UNITS + weightId]; } gradient += outputDeltas[i] * sum; } float weightDelta = trainingRate * gradient + momentum * inputWeightDeltas[weightId]; inputWeightDeltas[weightId] = weightDelta; inputWeights[weightId] += weightDelta; } }
f7c599eb5e551ab141b8625c1f72a2bd51d3d7f7.cu
#include "includes.h" __global__ void ChangeInputWeightsKernel( float *inputWeights, float *inputWeightDeltas, float *outputWeights, float *outputDeltas, float *inputWeightRTRLDerivatives, float trainingRate, float momentum ) { int weightId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (weightId < D_HIDDEN_UNITS * D_INPUT_UNITS) { float gradient = 0; for (int i = 0; i < D_OUTPUT_UNITS; i++) { float sum = 0; for (int j = 0; j < D_HIDDEN_UNITS; j++) { sum += outputWeights[i * D_HIDDEN_UNITS + j] * inputWeightRTRLDerivatives[j * D_HIDDEN_UNITS * D_INPUT_UNITS + weightId]; } gradient += outputDeltas[i] * sum; } float weightDelta = trainingRate * gradient + momentum * inputWeightDeltas[weightId]; inputWeightDeltas[weightId] = weightDelta; inputWeights[weightId] += weightDelta; } }
582ecfcecca15138b8f9184ae09b9e6f2125bc25.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 8 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); //checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); //checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
582ecfcecca15138b8f9184ae09b9e6f2125bc25.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATR.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 8 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
c61f791bbf4d6afd424afeba7faec4cbb63718e8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw( const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw( T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp(const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpFw< T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpFw<T>), dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpBw< T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpBw<T>), dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpBw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>);
c61f791bbf4d6afd424afeba7faec4cbb63718e8.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_op.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw( const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw( T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp(const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw( const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp(in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw( T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpFw< T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpFw<T><<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpBw< T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpBw<T><<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CUDADeviceContext>(); math::SetConstant<platform::CUDADeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpBw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>);
c6ef06e3bebdfd6301710a1f26b808b61fcb5f50.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_kernel_clearsuma_cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int num_grid = 1; float *dev_suma = NULL; hipMalloc(&dev_suma, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _kernel_clearsuma_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_grid,dev_suma); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _kernel_clearsuma_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_grid,dev_suma); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _kernel_clearsuma_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_grid,dev_suma); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c6ef06e3bebdfd6301710a1f26b808b61fcb5f50.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_kernel_clearsuma_cuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int num_grid = 1; float *dev_suma = NULL; cudaMalloc(&dev_suma, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _kernel_clearsuma_cuda<<<gridBlock,threadBlock>>>(num_grid,dev_suma); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _kernel_clearsuma_cuda<<<gridBlock,threadBlock>>>(num_grid,dev_suma); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _kernel_clearsuma_cuda<<<gridBlock,threadBlock>>>(num_grid,dev_suma); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3b486c2a38ae2607677529b2da9d7e3d5b70bb17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void doubling(int n, float *a) { int tid = blockDim.x*blockIdx.x + threadIdx.x; int i; while(tid < n) { a[tid] *= 2; for(i=0; i<1000; i++) a[tid] *= 1; tid += blockDim.x * gridDim.x; } } int main() { int i; int n=65535*3500; float *a, *a_dev; a = (float *)malloc(n*sizeof(float)); hipMalloc((void**)&a_dev, n*sizeof(float)); for(i=0; i<n; i++) a[i] = 1.; hipMemcpy(a_dev, a, n*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( doubling), dim3(65535), dim3(256), 0, 0, n, a_dev); hipMemcpy(a, a_dev, n*sizeof(float), hipMemcpyDeviceToHost); for(i=0; i<n; i++) { if(abs(a[i] - 2.) > 1e-5) printf("a[%d] = %g\n", i, a[i]); } free(a); hipFree(a_dev); return 0; }
3b486c2a38ae2607677529b2da9d7e3d5b70bb17.cu
#include <stdio.h> #include <stdlib.h> __global__ void doubling(int n, float *a) { int tid = blockDim.x*blockIdx.x + threadIdx.x; int i; while(tid < n) { a[tid] *= 2; for(i=0; i<1000; i++) a[tid] *= 1; tid += blockDim.x * gridDim.x; } } int main() { int i; int n=65535*3500; float *a, *a_dev; a = (float *)malloc(n*sizeof(float)); cudaMalloc((void**)&a_dev, n*sizeof(float)); for(i=0; i<n; i++) a[i] = 1.; cudaMemcpy(a_dev, a, n*sizeof(float), cudaMemcpyHostToDevice); doubling<<<65535, 256>>>(n, a_dev); cudaMemcpy(a, a_dev, n*sizeof(float), cudaMemcpyDeviceToHost); for(i=0; i<n; i++) { if(abs(a[i] - 2.) > 1e-5) printf("a[%d] = %g\n", i, a[i]); } free(a); cudaFree(a_dev); return 0; }
c4eb7e572448b74d9d424d46c6af54278ac5c9ce.hip
// !!! This is a file automatically generated by hipify!!! // Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <hip/hip_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=1; unsigned Value2=A[i]; unsigned Value3=B[i]; unsigned Value; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%2)==0){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1*Value1; Value3=I2*Value3; Value1*=Value2; Value3*=Value1; Value2*=Value3; Value1*=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); hipEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(hipEventRecord(start)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations); checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); hipDeviceSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
c4eb7e572448b74d9d424d46c6af54278ac5c9ce.cu
// Includes #include <stdio.h> #include <stdlib.h> // includes from project // includes from CUDA #include <cuda_runtime.h> //#include <helper_math.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 640 // Variables unsigned* h_A; unsigned* h_B; unsigned* h_C; unsigned* d_A; unsigned* d_B; unsigned* d_C; // Functions void CleanupResources(void); void RandomInit(unsigned*, int); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation unsigned Value1=1; unsigned Value2=A[i]; unsigned Value3=B[i]; unsigned Value; unsigned I1=A[i]; unsigned I2=B[i]; // Excessive INT addition access if((i%2)==0){ #pragma unroll 100 for(unsigned k=0; k<iterations;k++) { Value2= I1*Value1; Value3=I2*Value3; Value1*=Value2; Value3*=Value1; Value2*=Value3; Value1*=Value3; } } __syncthreads(); Value=Value1; C[i]=Value; __syncthreads(); } int main(int argc, char** argv) { int iterations; if (argc != 2){ fprintf(stderr,"usage: %s #iterations\n",argv[0]); exit(1); } else{ iterations = atoi(argv[1]); } printf("Power Microbenchmark with %d iterations\n",iterations); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(unsigned); // Allocate input vectors h_A and h_B in host memory h_A = (unsigned*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (unsigned*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (unsigned*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); cudaEvent_t start, stop; float elapsedTime = 0; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); checkCudaErrors(cudaEventRecord(start)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations); checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("gpu execution time = %.2f s\n", elapsedTime/1000); getLastCudaError("kernel launch failure"); cudaThreadSynchronize(); // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaEventDestroy(stop)); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random unsigned entries. void RandomInit(unsigned* data, int n) { for (int i = 0; i < n; ++i){ srand((unsigned)time(0)); data[i] = rand() / RAND_MAX; } }
f450c2b6aa21051b404c72a08892c6aa2ec3041e.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/book.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include <conio.h> #include <Windows.h> #include <math.h> #include <time.h> //Autores: Sanz Sacristn, Sergio y Zamorano Ortega, lvaro FILE *doc; FILE *leer; int columnas; int filas; char dificultad; char modo; int punt_record; char pedirModo(); char pedirDificultad(); int pedirFilasTablero(); int pedirColumnasTablero(); int* generarMatriz(); void imprimirMatriz(int *matriz, int *numVidas, int *puntos); bool imprimirEspacios(int x, int* matriz); int contNum(int num); int maxMatriz(int *matriz); bool rellenarMatriz(int* matriz); int recuento(int* matriz); char comprobarPropiedades(); void jugar(int *matriz, int* numVidas, int* puntos); void cargarPartida(); void guardarPartida(int* matriz, int* numVidas, int* puntos); int sumaPuntAux(int tamao, int* punt); void guardarRecord(); void cargarRecord(); int hilosBloque(int size); int mcm(int a, int b); int main(void) { int *matriz; char cargarP; //Funciones printf("Quieres cargar una partida anterior o empezar de nuevo ? (s: si | n : no)\n"); fflush(stdin); scanf("%c", &cargarP); while (cargarP != 's' && cargarP != 'n') { printf("Introduce un valor valido para iniciar el juego\n"); scanf("%c", &cargarP); } //Si no cargamos partida if (cargarP == 'n') { int vida = 5; int *numVidas; numVidas = &vida; int numPuntos = 0; int *puntos; puntos = &numPuntos; modo = pedirModo(); //Pedimos modo dificultad = pedirDificultad(); //Pedimos dificultad filas = pedirFilasTablero(); //Pedimos filas columnas = pedirColumnasTablero(); //Pedimos columnas printf("\nLos datos introducidos por el usuario son: %c %c %d %d\n", modo, dificultad, filas, columnas); char error = comprobarPropiedades(); if (error == 'T') { //Si al comprobar propiedades nos da error goto Error; } matriz = generarMatriz(); //Generamos la matriz del tablero rellenarMatriz(matriz); //La rellenamos de semillas cargarRecord(); //Cargamos la puntuacion record getchar(); getchar(); system("cls"); //Procedimiento para jugar al juego jugar(matriz, numVidas, puntos); //Al terminar de jugar, guardamos el record guardarRecord(); printf("\n - - - - - - - - - - - - - - - - - - - - -"); printf("\n - - - - - - JUEGO TERMINADO - - - - - - "); printf("\n - - - - - - - - - - - - - - - - - - - - - "); } else { //cargamos la partida cargarPartida(); guardarRecord(); printf("\n - - - - - - - - - - - - - - - - - - - - -"); printf("\n - - - - - - JUEGO TERMINADO - - - - - - "); printf("\n - - - - - - - - - - - - - - - - - - - - - "); } Error: getchar(); } //Funcion que devuelve un error si las dimensiones de la martiz son demasiado grandes para la grafica char comprobarPropiedades() { hipDeviceProp_t prop; char error = 'F'; int count; size_t globalMem; HANDLE_ERROR(hipGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(hipGetDeviceProperties(&prop, i)); globalMem = prop.totalGlobalMem; //Si el tamao de la matriz supera las limitaciones de capacidad if ((filas*columnas * sizeof(int)) >= globalMem) { printf("La matriz solicitada ocupa %zd y excede la capacidad de memoria global de tu tarjeta grfica que es %zd \n", filas*columnas * sizeof(int), globalMem); error = 'T'; } } return error; } // GENERAR FUNCIONES BASICAS PARA EL TABLERO //Generar matriz a 0 int *generarMatriz() { int* matriz = (int*)malloc(filas*columnas * sizeof(int)); for (int i = 0; i < filas*columnas; i++) { matriz[i] = 0; } return matriz; } //Rellenar la matriz bool rellenarMatriz(int* matriz) { bool terminado = false; int numSemillas; int numAleatorio; int random; time_t t; //Iniciamos el modo aleatorio srand((unsigned)time(&t)); //Si la dificultad es facil if (dificultad == 'F') { numSemillas = 15; //Introducimos 15 semillas if (recuento(matriz) < numSemillas) { terminado = true; } else { int posiblesNum[] = { 2, 4,8 }; //Seleccionamos uno de estos nmeros en cada semilla numAleatorio = 3; while (numSemillas > 0 && !terminado) { random = rand() % (filas*columnas); if (matriz[random] == 0) { matriz[random] = posiblesNum[rand() % numAleatorio]; //Aadimos la nueva semilla en una posicion aleatoria numSemillas = numSemillas - 1; } } //Si hay menos espacios libres que 15 game over if (recuento(matriz) < 15) { terminado = true; } } } //Si la dificultad es dificil else { numSemillas = 8; //Introducimos 8 semillas if (recuento(matriz) < numSemillas) { terminado = true; } else { int posiblesNum[] = { 2, 4 }; //Seleccionamos uno de estos numeros en cada semilla numAleatorio = 2; while (numSemillas > 0 && !terminado) { random = rand() % (filas*columnas); if (matriz[random] == 0) { matriz[random] = posiblesNum[rand() % numAleatorio]; //Aadimos la nueva semilla en una posicion aleatoria numSemillas = numSemillas - 1; } } //Si hay menos espacios libres que 8 game over if (recuento(matriz) < 8) { terminado = true; } } } return terminado; } //Cuenta el n de 0s en la matriz int recuento(int* matriz) { int recuento = 0; for (int i = 0; i < filas*columnas; i++) { if (matriz[i] == 0) { recuento = recuento + 1; } } return recuento; } //Metodo que solicita al usuario el modo char pedirModo() { char modo = ' '; getchar(); while (modo != 'M' && modo != 'A') { printf("Que modo desea para el juego? Automatico (A), Manual (M)\n"); fflush(stdin); scanf("%c", &modo); if (modo != 'M' && modo != 'A') { printf("Usted ha introducido un modo que no existe: -%c.\n", modo); printf("Por favor, introduzca uno de los siguientes dmodos que se le presentan por pantalla.\n\n"); scanf("%c", &modo); } } return modo; } //Metodo que solicita al usuario la dificultad char pedirDificultad() { char dificultad = ' '; getchar(); while (dificultad != 'F' && dificultad != 'D') { printf("Que dificultad desea para el juego? Facil (F), Dificil (D)\n"); fflush(stdin); scanf("%c", &dificultad); if (dificultad != 'F' && dificultad != 'D') { printf("Usted ha introducido una dificutad que no existe: -%c.\n", dificultad); printf("Por favor, introduzca uno de las siguientes dificultades que se le presentan por pantalla.\n\n"); scanf("%c", &dificultad); } } return dificultad; } //Metodo que solicita al usuario el numero de filas del tablero int pedirFilasTablero() { int filas; do { printf("\nIntroduzca las filas que tendra el tablero: "); fflush(stdin); scanf("%d", &filas); if (filas < 1) { printf("Introduzca un numero de filas correcto\n"); } } while (filas < 1); //El numero de filas tiene que ser un numero entero positivo return filas; } //Metodo para solicitar al usuario el numero de columnas del tablero int pedirColumnasTablero() { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); int columnas; do { printf("\nIntroduzca las columnas que tendra el tablero: "); fflush(stdin); scanf("%d", &columnas); if (columnas < 1) { printf("Introduzca un numero de columnas correcto\n"); } } while (columnas < 1); //El numero de filas tiene que ser un numero entero positivo return columnas; } // FUNCIONES DE IMPRESIN DEL TABLERO void imprimirMatriz(int *matriz, int *numVidas, int *puntos) { bool impar = false; printf("\nVIDAS: %d\n", *numVidas); printf("\nPUNTOS: %d", *puntos); printf(" RECORD: %d\n", punt_record); printf("\n\t|"); for (int i = 0; i < filas*columnas; i++) { if ((i + 1) % columnas == 0) { if (matriz[i] == 0) { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf(" "); imprimirEspacios(matriz[i], matriz); if (i == (filas*columnas) - 1) { printf("|\n\t"); } else { printf("|\n\t|"); } } else { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf("%d", matriz[i]); imprimirEspacios(matriz[i], matriz); if (i == (filas*columnas) - 1) { printf("|\n\t"); } else { printf("|\n\t|"); } } } else { if (matriz[i] == 0) { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf(" "); imprimirEspacios(matriz[i], matriz); printf("|"); } else { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf("%d", matriz[i]); imprimirEspacios(matriz[i], matriz); printf("|"); } } } printf("\n"); } bool imprimirEspacios(int x, int* matriz) { bool impar = false; int cifras = 0; int max = 0; int espacios; cifras = contNum(x); max = maxMatriz(matriz); espacios = contNum(max) - cifras; if (espacios % 2 != 0) { impar = true; } for (int i = 0; i < espacios / 2; i++) { printf(" "); } return impar; } int contNum(int num) { int contador = 0; while (num / 10 > 0) { num = num / 10; contador++; } return contador + 1; } int maxMatriz(int *matriz) { int max = 0; for (int i = 0; i < filas*columnas; i++) { if (max < matriz[i]) { max = matriz[i]; } } return max; } // KERNELS //Kernel que suma los elementos hacia la derecha __global__ void sumarElementosDerecha(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; //Las posiciones se recorren en la misma fila int i = 1; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si est en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (columna == numColumnas - 1 || matriz[pos] == 0 || (matriz[pos + i] != 0 && matriz[pos + i] != matriz[pos]) || pos > numFilas*numColumnas) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos + i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos + i; primero = false; } numElementos = numElementos + 1; } //Si es la columna limite if ((pos + 1 + i) % numColumnas == 0) { terminado = true; } i++; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel que suma elementos hacia izquierda __global__ void sumarElementosIzquierda(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; //Las posiciones se recorren en la misma fila int i = 1; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si est en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (columna == 0 || matriz[pos] == 0 || (matriz[pos - i] != 0 && matriz[pos - i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos - i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos - i; primero = false; } numElementos = numElementos + 1; } //Si es la columna limite if ((pos - i) % numColumnas == 0) { terminado = true; } i++; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel para sumar elementos hacia arriba __global__ void sumarElementosArriba(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; int i = numColumnas; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si est en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (fila == 0 || matriz[pos] == 0 || (matriz[pos - i] != 0 && matriz[pos - i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos - i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos - i; primero = false; } numElementos = numElementos + 1; } //Si es la fila limite if ((pos - i) < numColumnas) { terminado = true; } i = i + numColumnas; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel para sumar los elementos hacia abajo __global__ void sumarElementosAbajo(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; int i = numColumnas; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si est en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (fila == numFilas - 1 || matriz[pos] == 0 || (matriz[pos + i] != 0 && matriz[pos + i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos + i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos + i; primero = false; } numElementos = numElementos + 1; } //Si es la fila limite if ((pos + i) >= (numColumnas*(numFilas - 1))) { terminado = true; } i = i + numColumnas; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel que mueve elementos hacia la derecha __global__ void moverElementosDerecha(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool mov = false; bool terminado = false; int numElementos = 0; int i = 1; //Las posiciones que recorren en la misma fila int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si el hilo est en la columna limite if (columna == numColumnas - 1) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos + i] == 0) { numElementos = numElementos + 1; mov = true; } //Si la posicion a estudiar es el limite if (columna + i == numColumnas - 1) { terminado = true; } i++; } } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[fila* numColumnas + columna + numElementos] = valor; } } } //Kernel que mueve elementos hacia la izquierda __global__ void moverElementosIzquierda(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estas dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; bool mov = false; bool terminado = false; int valor = matriz[pos]; int numElementos = 0; int i = 1; //Las posiciones que recorren en la misma fila do { if (pos % numColumnas == 0) {//Si el hilo est en la columna limite terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos - i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos - i) % numColumnas == 0) { if (numElementos > 0) { mov = true; } terminado = true; } } i++; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos - numElementos] = valor; } } } //Kernel que mueve elementos hacia arriba __global__ void moverElementosArriba(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { //int pos = blockIdx.x*blockDim.x + threadIdx.x; int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; bool mov = false; bool terminado = false; int valor = matriz[pos]; int numElementos = 0; int i = numColumnas; //Las posiciones se recorren mediante el numero de columnas do { //Si el hilo est en la fila limite if (pos < numColumnas) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos - i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos - i) < numColumnas) { if (numElementos > 0) { mov = true; } terminado = true; } } i = i + numColumnas; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos - (numElementos * numColumnas)] = valor; } } } //Kernel que mueve elemenetos hacia abajo __global__ void moverElementosAbajo(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila o la columna esta dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; int valor = matriz[pos]; bool mov = false; bool terminado = false; int numElementos = 0; int i = numColumnas; //Las posiciones se recorren mediante el numero de columnas do { //Si el hilo est en la fila limite if (pos >= numColumnas * (numFilas - 1)) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos + i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos + i) >= numColumnas * (numFilas - 1)) { if (numElementos > 0) { mov = true; } terminado = true; } } i = i + numColumnas; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos + (numElementos * numColumnas)] = valor; } } } //Metodo que suma los puntos de cada celda sumada int sumaPuntAux(int tamao, int* punt) { int suma = 0; for (int i = 0; i < tamao; i++) { suma += punt[i]; } return suma; } //Funcion que calcula el minimo comun multiplo de las teselas y el numero de hilos del tablero int mcm(int a, int b) { int mult, mult2, multiplo = 0; int i, j; for (i = a; i > 1; i--) { if (a%i == 0) mult = i; for (j = b; j > 1; j--) { if (b%j == 0) mult2 = j; if (mult == mult2) multiplo = mult; } } if (multiplo == 0) multiplo = a * b; return multiplo; } //Funcion que devuelve el numero de hilos por bloque mas optimo int hilosBloque(int size) { //El numero de hilos en la tesela que seleccionemos ser de una de estas posibilidades int hilosBloque[3] = { 64, 256, 1024 }; int hilos = 64, min, n; //Si el n total de hilos es alguno de estos lo dividimos para conseguir una tesela mas optima if (size == 64 || size == 256 || size == 1024) { hilos = size / 4; } else { min = mcm(hilosBloque[0], size); for (int i = 1; i < 3; i++) { n = mcm(hilosBloque[i], size); if (n < min) { min = n; hilos = hilosBloque[i]; } } } return hilos; } //Funcion que simula el juego void jugar(int *matriz, int* numVidas, int* puntos) { int *dev_matriz; int *dev_puntos; int *dev_matrizAux; int *dev_matrizSuma; char movimiento = ' '; int numRan; bool terminado; int* puntos_aux = generarMatriz(); //Vector que almacena temporalmente los puntos sumados de las celdas int* matriz_aux = generarMatriz(); //Matriz auxiliar para realizar los movimientos int* matriz_suma = generarMatriz(); //Matriz auxiliar que almacena los elementos sumados al mover time_t t; srand((unsigned)time(&t)); while (*numVidas > 0) { terminado = false; imprimirMatriz(matriz, numVidas, puntos); while (!terminado) { if (modo == 'M') { printf("Pulse una flecha...Pulse g para guardar"); bool bucle = true; while (bucle) { movimiento = _getch(); switch (movimiento) { case 72: movimiento = 'W'; //Arriba bucle = false; break; case 80: movimiento = 'S'; //Abajo bucle = false; break; case 75: movimiento = 'A'; //Izquierda bucle = false; break; case 77: movimiento = 'D'; //Derecha bucle = false; break; case 103: //SI PULSAS G GUARDA LA PARTIDA guardarPartida(matriz, numVidas, puntos); break; } } } else { numRan = rand() % 4; switch (numRan) { case 0: movimiento = 'W'; //Arriba break; case 1: movimiento = 'A'; //Izquierda break; case 2: movimiento = 'S'; //Abajo break; case 3: movimiento = 'D'; //Derecha break; } } //Inicializamos a 0 los elementos del vector de puntos for (int i = 0; i < filas*columnas; i++) { puntos_aux[i] = 0; } //Inicializamos a 0 los elementos de la matriz auxiliar for (int i = 0; i < filas*columnas; i++) { matriz_aux[i] = 0; } //Reservamos posicione de memoria y copiamos de host a device hipMalloc((void**)&dev_matriz, filas*columnas * sizeof(int)); hipMemcpy(dev_matriz, matriz, filas*columnas * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_puntos, filas*columnas * sizeof(int)); hipMemcpy(dev_puntos, puntos_aux, filas*columnas * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_matrizAux, filas*columnas * sizeof(int)); hipMemcpy(dev_matrizAux, matriz_aux, filas*columnas * sizeof(int), hipMemcpyHostToDevice); hipMalloc((void**)&dev_matrizSuma, filas*columnas * sizeof(int)); hipMemcpy(dev_matrizSuma, matriz_suma, filas*columnas * sizeof(int), hipMemcpyHostToDevice); //Calculamos el numero de hilos por bloque mas optimo int hilospBloque = hilosBloque(filas*columnas); //La tesela es la raiz cuadrada del numero de hilos por bloque int tesela = (int)sqrt(hilospBloque); //Tamao del grid en bloques dim3 dimGrid(columnas + tesela - 1 / tesela, filas + tesela - 1 / tesela); //Tamao de los bloques en hilos dim3 dimBlock(tesela, tesela); //Llamamos a los kernels correspondientes dependiendo del movimiento switch (movimiento) { case 'W': printf("\n\nARRIBA"); sumarElementosArriba << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosArriba << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'A': printf("\n\nIZQUIERDA"); sumarElementosIzquierda << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosIzquierda << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'S': printf("\n\nABAJO"); sumarElementosAbajo << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosAbajo << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'D': printf("\n\nDERECHA"); sumarElementosDerecha << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosDerecha << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; } //Recuperamos los datos del device hipMemcpy(matriz, dev_matriz, filas*columnas * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(matriz_aux, dev_matrizAux, filas*columnas * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(puntos_aux, dev_puntos, filas*columnas * sizeof(int), hipMemcpyDeviceToHost); //Se suman los puntos *puntos += sumaPuntAux(filas*columnas, puntos_aux); //Si la puntuacion es mayor que el record if (*puntos > punt_record) { punt_record = *puntos; } //Almacenamos en el tablero los elementos de la matriz auxiliar for (int k = 0; k < filas*columnas; k++) { matriz[k] = matriz_aux[k]; } terminado = rellenarMatriz(matriz); imprimirMatriz(matriz, numVidas, puntos); hipFree(dev_matriz); //Si no se pueden meter tantas semillas que establece el modo seleccionado if (terminado) { *numVidas = *numVidas - 1; printf("\t\tGAME OVER. Pulsa ENTER"); getchar(); } } //Liberamos memoria free(matriz); //Si todavia quedan vidas if (*numVidas > 0) { matriz = generarMatriz();//Se genera otro tablero rellenarMatriz(matriz); } system("cls"); } system("cls"); } //Funcion para cargar una partida guardada en el txt guardado void cargarPartida() { leer = fopen("guardado.txt", "r"); //Punteros; int vida; int *numVidas = NULL; numVidas = &vida; int puntos; int *numPuntos = NULL; numPuntos = &puntos; //leer variables del txt fscanf(leer, "%d", &filas); printf("\nFILAS: %d", filas); fscanf(leer, "%d", &columnas); printf("\nCOLUMNAS: %d", columnas); fscanf(leer, "%hhd", &dificultad); printf("\nDIFICULTAD: %c", dificultad); fscanf(leer, "%hhd", &modo); printf("\nMODO: %c", modo); fscanf(leer, "%d", &vida); printf("\nNUMERO DE VIDAS: %d", vida); fscanf(leer, "%d", &puntos); printf("\nPUNTOS: %d", puntos); int* matriz = (int*)malloc(filas*columnas * sizeof(int)); for (int i = 0; i < filas*columnas; i++) { fscanf(leer, "%d", &matriz[i]); } cargarRecord();//Cargamos puntuacion record jugar(matriz, numVidas, numPuntos); } //Metodo para guardar la partida en txt void guardarPartida(int* matriz, int* numVidas, int* puntos) { doc = fopen("guardado.txt", "w"); fprintf(doc, "%i \n", filas); fprintf(doc, "%i \n", columnas); fprintf(doc, "%i \n", dificultad); fprintf(doc, "%i \n", modo); fprintf(doc, "%i \n", *numVidas); fprintf(doc, "%i \n", *puntos); for (int i = 0; i < (filas*columnas); i++) { fprintf(doc, "%i ", matriz[i]); } fclose(doc); printf("\n--GUARDADO--\n"); } //Metodo para cargar record void cargarRecord() { leer = fopen("record.txt", "r"); fscanf(leer, "%d", &punt_record); } //Metodo para guardar record void guardarRecord() { doc = fopen("record.txt", "w"); fprintf(doc, "%i \n", punt_record); fclose(doc); printf("\n--GUARDADO RECORD--\n"); }
f450c2b6aa21051b404c72a08892c6aa2ec3041e.cu
#include "../common/book.h" #include "cuda_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "device_launch_parameters.h" #include <conio.h> #include <Windows.h> #include <math.h> #include <time.h> //Autores: Sanz Sacristán, Sergio y Zamorano Ortega, Álvaro FILE *doc; FILE *leer; int columnas; int filas; char dificultad; char modo; int punt_record; char pedirModo(); char pedirDificultad(); int pedirFilasTablero(); int pedirColumnasTablero(); int* generarMatriz(); void imprimirMatriz(int *matriz, int *numVidas, int *puntos); bool imprimirEspacios(int x, int* matriz); int contNum(int num); int maxMatriz(int *matriz); bool rellenarMatriz(int* matriz); int recuento(int* matriz); char comprobarPropiedades(); void jugar(int *matriz, int* numVidas, int* puntos); void cargarPartida(); void guardarPartida(int* matriz, int* numVidas, int* puntos); int sumaPuntAux(int tamaño, int* punt); void guardarRecord(); void cargarRecord(); int hilosBloque(int size); int mcm(int a, int b); int main(void) { int *matriz; char cargarP; //Funciones printf("Quieres cargar una partida anterior o empezar de nuevo ? (s: si | n : no)\n"); fflush(stdin); scanf("%c", &cargarP); while (cargarP != 's' && cargarP != 'n') { printf("Introduce un valor valido para iniciar el juego\n"); scanf("%c", &cargarP); } //Si no cargamos partida if (cargarP == 'n') { int vida = 5; int *numVidas; numVidas = &vida; int numPuntos = 0; int *puntos; puntos = &numPuntos; modo = pedirModo(); //Pedimos modo dificultad = pedirDificultad(); //Pedimos dificultad filas = pedirFilasTablero(); //Pedimos filas columnas = pedirColumnasTablero(); //Pedimos columnas printf("\nLos datos introducidos por el usuario son: %c %c %d %d\n", modo, dificultad, filas, columnas); char error = comprobarPropiedades(); if (error == 'T') { //Si al comprobar propiedades nos da error goto Error; } matriz = generarMatriz(); //Generamos la matriz del tablero rellenarMatriz(matriz); //La rellenamos de semillas cargarRecord(); //Cargamos la puntuacion record getchar(); getchar(); system("cls"); //Procedimiento para jugar al juego jugar(matriz, numVidas, puntos); //Al terminar de jugar, guardamos el record guardarRecord(); printf("\n - - - - - - - - - - - - - - - - - - - - -"); printf("\n - - - - - - JUEGO TERMINADO - - - - - - "); printf("\n - - - - - - - - - - - - - - - - - - - - - "); } else { //cargamos la partida cargarPartida(); guardarRecord(); printf("\n - - - - - - - - - - - - - - - - - - - - -"); printf("\n - - - - - - JUEGO TERMINADO - - - - - - "); printf("\n - - - - - - - - - - - - - - - - - - - - - "); } Error: getchar(); } //Funcion que devuelve un error si las dimensiones de la martiz son demasiado grandes para la grafica char comprobarPropiedades() { cudaDeviceProp prop; char error = 'F'; int count; size_t globalMem; HANDLE_ERROR(cudaGetDeviceCount(&count)); for (int i = 0; i < count; i++) { HANDLE_ERROR(cudaGetDeviceProperties(&prop, i)); globalMem = prop.totalGlobalMem; //Si el tamaño de la matriz supera las limitaciones de capacidad if ((filas*columnas * sizeof(int)) >= globalMem) { printf("La matriz solicitada ocupa %zd y excede la capacidad de memoria global de tu tarjeta gráfica que es %zd \n", filas*columnas * sizeof(int), globalMem); error = 'T'; } } return error; } // GENERAR FUNCIONES BASICAS PARA EL TABLERO //Generar matriz a 0 int *generarMatriz() { int* matriz = (int*)malloc(filas*columnas * sizeof(int)); for (int i = 0; i < filas*columnas; i++) { matriz[i] = 0; } return matriz; } //Rellenar la matriz bool rellenarMatriz(int* matriz) { bool terminado = false; int numSemillas; int numAleatorio; int random; time_t t; //Iniciamos el modo aleatorio srand((unsigned)time(&t)); //Si la dificultad es facil if (dificultad == 'F') { numSemillas = 15; //Introducimos 15 semillas if (recuento(matriz) < numSemillas) { terminado = true; } else { int posiblesNum[] = { 2, 4,8 }; //Seleccionamos uno de estos números en cada semilla numAleatorio = 3; while (numSemillas > 0 && !terminado) { random = rand() % (filas*columnas); if (matriz[random] == 0) { matriz[random] = posiblesNum[rand() % numAleatorio]; //Añadimos la nueva semilla en una posicion aleatoria numSemillas = numSemillas - 1; } } //Si hay menos espacios libres que 15 game over if (recuento(matriz) < 15) { terminado = true; } } } //Si la dificultad es dificil else { numSemillas = 8; //Introducimos 8 semillas if (recuento(matriz) < numSemillas) { terminado = true; } else { int posiblesNum[] = { 2, 4 }; //Seleccionamos uno de estos numeros en cada semilla numAleatorio = 2; while (numSemillas > 0 && !terminado) { random = rand() % (filas*columnas); if (matriz[random] == 0) { matriz[random] = posiblesNum[rand() % numAleatorio]; //Añadimos la nueva semilla en una posicion aleatoria numSemillas = numSemillas - 1; } } //Si hay menos espacios libres que 8 game over if (recuento(matriz) < 8) { terminado = true; } } } return terminado; } //Cuenta el nº de 0s en la matriz int recuento(int* matriz) { int recuento = 0; for (int i = 0; i < filas*columnas; i++) { if (matriz[i] == 0) { recuento = recuento + 1; } } return recuento; } //Metodo que solicita al usuario el modo char pedirModo() { char modo = ' '; getchar(); while (modo != 'M' && modo != 'A') { printf("Que modo desea para el juego? Automatico (A), Manual (M)\n"); fflush(stdin); scanf("%c", &modo); if (modo != 'M' && modo != 'A') { printf("Usted ha introducido un modo que no existe: -%c.\n", modo); printf("Por favor, introduzca uno de los siguientes dmodos que se le presentan por pantalla.\n\n"); scanf("%c", &modo); } } return modo; } //Metodo que solicita al usuario la dificultad char pedirDificultad() { char dificultad = ' '; getchar(); while (dificultad != 'F' && dificultad != 'D') { printf("Que dificultad desea para el juego? Facil (F), Dificil (D)\n"); fflush(stdin); scanf("%c", &dificultad); if (dificultad != 'F' && dificultad != 'D') { printf("Usted ha introducido una dificutad que no existe: -%c.\n", dificultad); printf("Por favor, introduzca uno de las siguientes dificultades que se le presentan por pantalla.\n\n"); scanf("%c", &dificultad); } } return dificultad; } //Metodo que solicita al usuario el numero de filas del tablero int pedirFilasTablero() { int filas; do { printf("\nIntroduzca las filas que tendra el tablero: "); fflush(stdin); scanf("%d", &filas); if (filas < 1) { printf("Introduzca un numero de filas correcto\n"); } } while (filas < 1); //El numero de filas tiene que ser un numero entero positivo return filas; } //Metodo para solicitar al usuario el numero de columnas del tablero int pedirColumnasTablero() { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); int columnas; do { printf("\nIntroduzca las columnas que tendra el tablero: "); fflush(stdin); scanf("%d", &columnas); if (columnas < 1) { printf("Introduzca un numero de columnas correcto\n"); } } while (columnas < 1); //El numero de filas tiene que ser un numero entero positivo return columnas; } // FUNCIONES DE IMPRESIÓN DEL TABLERO void imprimirMatriz(int *matriz, int *numVidas, int *puntos) { bool impar = false; printf("\nVIDAS: %d\n", *numVidas); printf("\nPUNTOS: %d", *puntos); printf(" RECORD: %d\n", punt_record); printf("\n\t|"); for (int i = 0; i < filas*columnas; i++) { if ((i + 1) % columnas == 0) { if (matriz[i] == 0) { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf(" "); imprimirEspacios(matriz[i], matriz); if (i == (filas*columnas) - 1) { printf("|\n\t"); } else { printf("|\n\t|"); } } else { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf("%d", matriz[i]); imprimirEspacios(matriz[i], matriz); if (i == (filas*columnas) - 1) { printf("|\n\t"); } else { printf("|\n\t|"); } } } else { if (matriz[i] == 0) { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf(" "); imprimirEspacios(matriz[i], matriz); printf("|"); } else { impar = imprimirEspacios(matriz[i], matriz); if (impar) { printf(" "); } printf("%d", matriz[i]); imprimirEspacios(matriz[i], matriz); printf("|"); } } } printf("\n"); } bool imprimirEspacios(int x, int* matriz) { bool impar = false; int cifras = 0; int max = 0; int espacios; cifras = contNum(x); max = maxMatriz(matriz); espacios = contNum(max) - cifras; if (espacios % 2 != 0) { impar = true; } for (int i = 0; i < espacios / 2; i++) { printf(" "); } return impar; } int contNum(int num) { int contador = 0; while (num / 10 > 0) { num = num / 10; contador++; } return contador + 1; } int maxMatriz(int *matriz) { int max = 0; for (int i = 0; i < filas*columnas; i++) { if (max < matriz[i]) { max = matriz[i]; } } return max; } // KERNELS //Kernel que suma los elementos hacia la derecha __global__ void sumarElementosDerecha(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; //Las posiciones se recorren en la misma fila int i = 1; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si está en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (columna == numColumnas - 1 || matriz[pos] == 0 || (matriz[pos + i] != 0 && matriz[pos + i] != matriz[pos]) || pos > numFilas*numColumnas) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos + i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos + i; primero = false; } numElementos = numElementos + 1; } //Si es la columna limite if ((pos + 1 + i) % numColumnas == 0) { terminado = true; } i++; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel que suma elementos hacia izquierda __global__ void sumarElementosIzquierda(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; //Las posiciones se recorren en la misma fila int i = 1; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si está en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (columna == 0 || matriz[pos] == 0 || (matriz[pos - i] != 0 && matriz[pos - i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos - i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos - i; primero = false; } numElementos = numElementos + 1; } //Si es la columna limite if ((pos - i) % numColumnas == 0) { terminado = true; } i++; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel para sumar elementos hacia arriba __global__ void sumarElementosArriba(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; int i = numColumnas; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si está en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (fila == 0 || matriz[pos] == 0 || (matriz[pos - i] != 0 && matriz[pos - i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos - i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos - i; primero = false; } numElementos = numElementos + 1; } //Si es la fila limite if ((pos - i) < numColumnas) { terminado = true; } i = i + numColumnas; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel para sumar los elementos hacia abajo __global__ void sumarElementosAbajo(int *puntos, int *matriz, int numFilas, int numColumnas, int* matriz_suma) { int columna = threadIdx.x + blockIdx.x * blockDim.x; int fila = threadIdx.y + blockIdx.y * blockDim.y; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool suma = false; bool terminado = false; int numElementos = 1; int posElementoSuma; int i = numColumnas; bool primero = true; int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si está en el limite, si su valor es 0, o si el valor de la posicion a estudiar es de dintinto valor if (fila == numFilas - 1 || matriz[pos] == 0 || (matriz[pos + i] != 0 && matriz[pos + i] != matriz[pos])) { terminado = true; } else { //Si son de igual valor if (matriz[pos] == matriz[pos + i]) { //Si es el primer elemento con igual valor if (primero) { posElementoSuma = pos + i; primero = false; } numElementos = numElementos + 1; } //Si es la fila limite if ((pos + i) >= (numColumnas*(numFilas - 1))) { terminado = true; } i = i + numColumnas; } } while (terminado == false); //Si el numero de iguales es par if (numElementos % 2 == 0) { suma = true; } //Si el valor de la celda es distinto de 0, no suma y la posicion en la auxiliar es 0 if (valor != 0 && !suma && matriz_suma[pos] == 0) { matriz_suma[pos] = valor; } //Realiza la suma, la almacena en la matriz_suma y contabiliza los puntos if (suma) { matriz_suma[posElementoSuma] = matriz[posElementoSuma] + matriz[pos];//Se mete el valor a la matriz suma auxiliar puntos[pos] = matriz_suma[posElementoSuma]; } } } //Kernel que mueve elementos hacia la derecha __global__ void moverElementosDerecha(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { bool mov = false; bool terminado = false; int numElementos = 0; int i = 1; //Las posiciones que recorren en la misma fila int pos = fila * numColumnas + columna; int valor = matriz[pos]; do { //Si el hilo está en la columna limite if (columna == numColumnas - 1) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos + i] == 0) { numElementos = numElementos + 1; mov = true; } //Si la posicion a estudiar es el limite if (columna + i == numColumnas - 1) { terminado = true; } i++; } } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[fila* numColumnas + columna + numElementos] = valor; } } } //Kernel que mueve elementos hacia la izquierda __global__ void moverElementosIzquierda(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estas dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; bool mov = false; bool terminado = false; int valor = matriz[pos]; int numElementos = 0; int i = 1; //Las posiciones que recorren en la misma fila do { if (pos % numColumnas == 0) {//Si el hilo está en la columna limite terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos - i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos - i) % numColumnas == 0) { if (numElementos > 0) { mov = true; } terminado = true; } } i++; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos - numElementos] = valor; } } } //Kernel que mueve elementos hacia arriba __global__ void moverElementosArriba(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { //int pos = blockIdx.x*blockDim.x + threadIdx.x; int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila y la columna estan dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; bool mov = false; bool terminado = false; int valor = matriz[pos]; int numElementos = 0; int i = numColumnas; //Las posiciones se recorren mediante el numero de columnas do { //Si el hilo está en la fila limite if (pos < numColumnas) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos - i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos - i) < numColumnas) { if (numElementos > 0) { mov = true; } terminado = true; } } i = i + numColumnas; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos - (numElementos * numColumnas)] = valor; } } } //Kernel que mueve elemenetos hacia abajo __global__ void moverElementosAbajo(int *matriz, int numFilas, int numColumnas, int tesela, int* matriz_aux) { int columna = threadIdx.x + blockIdx.x * tesela; int fila = threadIdx.y + blockIdx.y * tesela; //Si la fila o la columna esta dentro de los limites if (fila < numFilas && columna < numColumnas) { int pos = fila * numColumnas + columna; int valor = matriz[pos]; bool mov = false; bool terminado = false; int numElementos = 0; int i = numColumnas; //Las posiciones se recorren mediante el numero de columnas do { //Si el hilo está en la fila limite if (pos >= numColumnas * (numFilas - 1)) { terminado = true; } else { //Si el valor del hilo es 0 if (matriz[pos] == 0) { terminado = true; } else { //Si se encuentra un 0 if (matriz[pos + i] == 0) { numElementos = numElementos + 1; } //Si la posicion a estudiar es el limite if ((pos + i) >= numColumnas * (numFilas - 1)) { if (numElementos > 0) { mov = true; } terminado = true; } } i = i + numColumnas; } } while (terminado == false); //Si el valor de la posicion del hilo es distinto de 0 y no se mueve if (valor != 0 && !mov) { matriz_aux[pos] = valor; } //Mueve el valor a la posicion correspondiente y la almacena en la matriz auxiliar if (mov) { matriz_aux[pos + (numElementos * numColumnas)] = valor; } } } //Metodo que suma los puntos de cada celda sumada int sumaPuntAux(int tamaño, int* punt) { int suma = 0; for (int i = 0; i < tamaño; i++) { suma += punt[i]; } return suma; } //Funcion que calcula el minimo comun multiplo de las teselas y el numero de hilos del tablero int mcm(int a, int b) { int mult, mult2, multiplo = 0; int i, j; for (i = a; i > 1; i--) { if (a%i == 0) mult = i; for (j = b; j > 1; j--) { if (b%j == 0) mult2 = j; if (mult == mult2) multiplo = mult; } } if (multiplo == 0) multiplo = a * b; return multiplo; } //Funcion que devuelve el numero de hilos por bloque mas optimo int hilosBloque(int size) { //El numero de hilos en la tesela que seleccionemos será de una de estas posibilidades int hilosBloque[3] = { 64, 256, 1024 }; int hilos = 64, min, n; //Si el nº total de hilos es alguno de estos lo dividimos para conseguir una tesela mas optima if (size == 64 || size == 256 || size == 1024) { hilos = size / 4; } else { min = mcm(hilosBloque[0], size); for (int i = 1; i < 3; i++) { n = mcm(hilosBloque[i], size); if (n < min) { min = n; hilos = hilosBloque[i]; } } } return hilos; } //Funcion que simula el juego void jugar(int *matriz, int* numVidas, int* puntos) { int *dev_matriz; int *dev_puntos; int *dev_matrizAux; int *dev_matrizSuma; char movimiento = ' '; int numRan; bool terminado; int* puntos_aux = generarMatriz(); //Vector que almacena temporalmente los puntos sumados de las celdas int* matriz_aux = generarMatriz(); //Matriz auxiliar para realizar los movimientos int* matriz_suma = generarMatriz(); //Matriz auxiliar que almacena los elementos sumados al mover time_t t; srand((unsigned)time(&t)); while (*numVidas > 0) { terminado = false; imprimirMatriz(matriz, numVidas, puntos); while (!terminado) { if (modo == 'M') { printf("Pulse una flecha...Pulse g para guardar"); bool bucle = true; while (bucle) { movimiento = _getch(); switch (movimiento) { case 72: movimiento = 'W'; //Arriba bucle = false; break; case 80: movimiento = 'S'; //Abajo bucle = false; break; case 75: movimiento = 'A'; //Izquierda bucle = false; break; case 77: movimiento = 'D'; //Derecha bucle = false; break; case 103: //SI PULSAS G GUARDA LA PARTIDA guardarPartida(matriz, numVidas, puntos); break; } } } else { numRan = rand() % 4; switch (numRan) { case 0: movimiento = 'W'; //Arriba break; case 1: movimiento = 'A'; //Izquierda break; case 2: movimiento = 'S'; //Abajo break; case 3: movimiento = 'D'; //Derecha break; } } //Inicializamos a 0 los elementos del vector de puntos for (int i = 0; i < filas*columnas; i++) { puntos_aux[i] = 0; } //Inicializamos a 0 los elementos de la matriz auxiliar for (int i = 0; i < filas*columnas; i++) { matriz_aux[i] = 0; } //Reservamos posicione de memoria y copiamos de host a device cudaMalloc((void**)&dev_matriz, filas*columnas * sizeof(int)); cudaMemcpy(dev_matriz, matriz, filas*columnas * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_puntos, filas*columnas * sizeof(int)); cudaMemcpy(dev_puntos, puntos_aux, filas*columnas * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_matrizAux, filas*columnas * sizeof(int)); cudaMemcpy(dev_matrizAux, matriz_aux, filas*columnas * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_matrizSuma, filas*columnas * sizeof(int)); cudaMemcpy(dev_matrizSuma, matriz_suma, filas*columnas * sizeof(int), cudaMemcpyHostToDevice); //Calculamos el numero de hilos por bloque mas optimo int hilospBloque = hilosBloque(filas*columnas); //La tesela es la raiz cuadrada del numero de hilos por bloque int tesela = (int)sqrt(hilospBloque); //Tamaño del grid en bloques dim3 dimGrid(columnas + tesela - 1 / tesela, filas + tesela - 1 / tesela); //Tamaño de los bloques en hilos dim3 dimBlock(tesela, tesela); //Llamamos a los kernels correspondientes dependiendo del movimiento switch (movimiento) { case 'W': printf("\n\nARRIBA"); sumarElementosArriba << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosArriba << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'A': printf("\n\nIZQUIERDA"); sumarElementosIzquierda << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosIzquierda << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'S': printf("\n\nABAJO"); sumarElementosAbajo << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosAbajo << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; case 'D': printf("\n\nDERECHA"); sumarElementosDerecha << < dimGrid, dimBlock >> > (dev_puntos, dev_matriz, filas, columnas, dev_matrizSuma); moverElementosDerecha << < dimGrid, dimBlock >> > (dev_matrizSuma, filas, columnas, tesela, dev_matrizAux); break; } //Recuperamos los datos del device cudaMemcpy(matriz, dev_matriz, filas*columnas * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(matriz_aux, dev_matrizAux, filas*columnas * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(puntos_aux, dev_puntos, filas*columnas * sizeof(int), cudaMemcpyDeviceToHost); //Se suman los puntos *puntos += sumaPuntAux(filas*columnas, puntos_aux); //Si la puntuacion es mayor que el record if (*puntos > punt_record) { punt_record = *puntos; } //Almacenamos en el tablero los elementos de la matriz auxiliar for (int k = 0; k < filas*columnas; k++) { matriz[k] = matriz_aux[k]; } terminado = rellenarMatriz(matriz); imprimirMatriz(matriz, numVidas, puntos); cudaFree(dev_matriz); //Si no se pueden meter tantas semillas que establece el modo seleccionado if (terminado) { *numVidas = *numVidas - 1; printf("\t\tGAME OVER. Pulsa ENTER"); getchar(); } } //Liberamos memoria free(matriz); //Si todavia quedan vidas if (*numVidas > 0) { matriz = generarMatriz();//Se genera otro tablero rellenarMatriz(matriz); } system("cls"); } system("cls"); } //Funcion para cargar una partida guardada en el txt guardado void cargarPartida() { leer = fopen("guardado.txt", "r"); //Punteros; int vida; int *numVidas = NULL; numVidas = &vida; int puntos; int *numPuntos = NULL; numPuntos = &puntos; //leer variables del txt fscanf(leer, "%d", &filas); printf("\nFILAS: %d", filas); fscanf(leer, "%d", &columnas); printf("\nCOLUMNAS: %d", columnas); fscanf(leer, "%hhd", &dificultad); printf("\nDIFICULTAD: %c", dificultad); fscanf(leer, "%hhd", &modo); printf("\nMODO: %c", modo); fscanf(leer, "%d", &vida); printf("\nNUMERO DE VIDAS: %d", vida); fscanf(leer, "%d", &puntos); printf("\nPUNTOS: %d", puntos); int* matriz = (int*)malloc(filas*columnas * sizeof(int)); for (int i = 0; i < filas*columnas; i++) { fscanf(leer, "%d", &matriz[i]); } cargarRecord();//Cargamos puntuacion record jugar(matriz, numVidas, numPuntos); } //Metodo para guardar la partida en txt void guardarPartida(int* matriz, int* numVidas, int* puntos) { doc = fopen("guardado.txt", "w"); fprintf(doc, "%i \n", filas); fprintf(doc, "%i \n", columnas); fprintf(doc, "%i \n", dificultad); fprintf(doc, "%i \n", modo); fprintf(doc, "%i \n", *numVidas); fprintf(doc, "%i \n", *puntos); for (int i = 0; i < (filas*columnas); i++) { fprintf(doc, "%i ", matriz[i]); } fclose(doc); printf("\n--GUARDADO--\n"); } //Metodo para cargar record void cargarRecord() { leer = fopen("record.txt", "r"); fscanf(leer, "%d", &punt_record); } //Metodo para guardar record void guardarRecord() { doc = fopen("record.txt", "w"); fprintf(doc, "%i \n", punt_record); fclose(doc); printf("\n--GUARDADO RECORD--\n"); }
91c4666fb74aa4ab96657b936d97544e3f685031.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************** * * set up GPU for processing * **************************************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include "gpu_main.h" #include <hip/hip_runtime.h> #include <math.h> #define gScalar 0.2 texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /******************************************************************************/ GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight) { GPU_Palette X; X.gThreads.x = 32; // 32 x 32 = 1024 threads per block X.gThreads.y = 32; X.gThreads.z = 1; X.gBlocks.x = ceil(imageWidth / 32); // however many blocks needed for image X.gBlocks.y = ceil(imageHeight / 32); X.gBlocks.z = 1; X.palette_width = imageWidth; // save this info X.palette_height = imageHeight; X.num_pixels = imageWidth * imageHeight; // allocate memory on GPU corresponding to pixel colors: hipError_t err; err = hipMalloc((void**) &X.red, X.num_pixels * sizeof(float)); if(err != hipSuccess){ printf("cuda error allocating red = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMalloc((void**) &X.green, X.num_pixels * sizeof(float)); // g if(err != hipSuccess){ printf("cuda error allocating green = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMalloc((void**) &X.blue, X.num_pixels * sizeof(float)); // b if(err != hipSuccess){ printf("cuda error allocating blue = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(NULL, texRed, X.red, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); hipBindTexture2D(NULL, texGreen, X.green, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); hipBindTexture2D(NULL, texBlue, X.blue, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); return X; } /******************************************************************************/ void freeGPUPalette(GPU_Palette* P) { hipFree(P->red); hipFree(P->green); hipFree(P->blue); } /******************************************************************************/ int updatePalette(GPU_Palette* P, int xIdx, int yIdx, float z) { hipLaunchKernelGGL(( updateReds) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->red, xIdx, yIdx, z); hipLaunchKernelGGL(( updateGreens) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->green, xIdx, yIdx, z); hipLaunchKernelGGL(( updateBlues) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->blue, xIdx, yIdx, z); return 0; } /******************************************************************************/ __global__ void updateReds(float* red, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if( (powf((x+5 - xIdx), 2) + powf((y+5 - yIdx), 2)) < powf(round(z*0.65), 2)) { red[vecIdx] = 1; } else { red[vecIdx] *= 0.99; } } /******************************************************************************/ __global__ void updateGreens(float* green, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((powf((x+5 - xIdx), 2) + powf((y+5 -yIdx), 2)) < powf(round(z*0.65), 2)) { green[vecIdx] = 1.0; } else{ green[vecIdx] *= .90; } } /******************************************************************************/ __global__ void updateBlues(float* blue, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if((powf((x + 5 - xIdx), 2) + powf((y + 5 -yIdx), 2)) < powf(round(z * 0.65), 2)) { blue[vecIdx] = 1.0; } else{ blue[vecIdx] *= .90; } } /******************************************************************************/
91c4666fb74aa4ab96657b936d97544e3f685031.cu
/************************************************************************** * * set up GPU for processing * **************************************************************************/ #include <cuda.h> #include <stdio.h> #include "gpu_main.h" #include <cuda_runtime.h> #include <math.h> #define gScalar 0.2 texture<float, 2> texRed; texture<float, 2> texGreen; texture<float, 2> texBlue; /******************************************************************************/ GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight) { GPU_Palette X; X.gThreads.x = 32; // 32 x 32 = 1024 threads per block X.gThreads.y = 32; X.gThreads.z = 1; X.gBlocks.x = ceil(imageWidth / 32); // however many blocks needed for image X.gBlocks.y = ceil(imageHeight / 32); X.gBlocks.z = 1; X.palette_width = imageWidth; // save this info X.palette_height = imageHeight; X.num_pixels = imageWidth * imageHeight; // allocate memory on GPU corresponding to pixel colors: cudaError_t err; err = cudaMalloc((void**) &X.red, X.num_pixels * sizeof(float)); if(err != cudaSuccess){ printf("cuda error allocating red = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMalloc((void**) &X.green, X.num_pixels * sizeof(float)); // g if(err != cudaSuccess){ printf("cuda error allocating green = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMalloc((void**) &X.blue, X.num_pixels * sizeof(float)); // b if(err != cudaSuccess){ printf("cuda error allocating blue = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(NULL, texRed, X.red, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); cudaBindTexture2D(NULL, texGreen, X.green, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); cudaBindTexture2D(NULL, texBlue, X.blue, desc, X.palette_width, X.palette_width, sizeof(float) * X.palette_width); return X; } /******************************************************************************/ void freeGPUPalette(GPU_Palette* P) { cudaFree(P->red); cudaFree(P->green); cudaFree(P->blue); } /******************************************************************************/ int updatePalette(GPU_Palette* P, int xIdx, int yIdx, float z) { updateReds <<< P->gBlocks, P->gThreads >>> (P->red, xIdx, yIdx, z); updateGreens <<< P->gBlocks, P->gThreads >>> (P->green, xIdx, yIdx, z); updateBlues <<< P->gBlocks, P->gThreads >>> (P->blue, xIdx, yIdx, z); return 0; } /******************************************************************************/ __global__ void updateReds(float* red, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if( (powf((x+5 - xIdx), 2) + powf((y+5 - yIdx), 2)) < powf(round(z*0.65), 2)) { red[vecIdx] = 1; } else { red[vecIdx] *= 0.99; } } /******************************************************************************/ __global__ void updateGreens(float* green, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((powf((x+5 - xIdx), 2) + powf((y+5 -yIdx), 2)) < powf(round(z*0.65), 2)) { green[vecIdx] = 1.0; } else{ green[vecIdx] *= .90; } } /******************************************************************************/ __global__ void updateBlues(float* blue, int xIdx, int yIdx, float z){ int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if((powf((x + 5 - xIdx), 2) + powf((y + 5 -yIdx), 2)) < powf(round(z * 0.65), 2)) { blue[vecIdx] = 1.0; } else{ blue[vecIdx] *= .90; } } /******************************************************************************/
67a3f3deb936993d4a67a3c151cbb1cb586d3018.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <base.h> #include <image.h> #include <bayer.h> #include "base.h" #include "image.h" #include "cuda_utils.h" #include "ahd_kernels.cu" #include "util.h" void run_cuda_ahd(img *image, pixel*result); #define MBs (1024 * 1024) #define MIN_AVAIL 50 * MBs void cuda_ahd(img *image) { uint height = image->height; uint width = image->width; size_t mem_needed; size_t avail = 0, total = 0; int n = 0; size_t res_size = width * height * RGB * sizeof(pixel); pixel *result = mallocz<pixel>(res_size); call(hipMemGetInfo(&avail,&total)); //show_mem_info(); do { n++; size_t buf_size = (width+2*P) * (height+2*P)/n * sizeof(char4); size_t cie_buf_size = width * height/n * sizeof(float4); size_t homo_buf_size = width * height/n * sizeof(uchar); mem_needed = cie_buf_size * 2 + homo_buf_size * 2 + buf_size * 2; Info("Image size: %fMB Homo bufsize : %fMB CIE bufsize %fMB Mem needed: %fMB", MB(buf_size), MB(homo_buf_size), MB(cie_buf_size), MB(mem_needed)); } while (mem_needed > avail - MIN_AVAIL); int h = (height/n/2) * 2; // round to lowest multiple of 2 img *cropped_img = new_image(h,width); int offset = 0, i; for (i = 0; i < n-1; i++) { Info("Iteration %i, offset %d h: %d n: %d",i,offset,h,n); cropped_img->buffer = image->buffer + offset; run_cuda_ahd(cropped_img, result + offset); //memcpy(result + offset, res, cropped_buf_size); //free(res); offset += h * width * RGB; } /* Final tile may be slightly different size due to rounding */ cropped_img->height = height - i*h; Info("Final tile height %d",cropped_img->height); cropped_img->buffer = image->buffer + offset; run_cuda_ahd(cropped_img, result + offset); //memcpy(result + offset, res, cropped_img->height * width * RGB *sizeof(pixel)); //free(res); free(image->buffer); image->buffer = result; } void run_cuda_ahd(img *image, pixel *result) { #ifdef __CUDAEMU__ Info("Performing CUDA AHD interpolation (Emulation mode)"); #else Info("Performing CUDA AHD interpolation"); #endif uint height = image->height; uint width = image->width; #ifndef __CUDAEMU__ if (width % 32 > 0) { FatalError("Width must be a multiple of 32"); } #endif Info("Width: %d Height %d\n",width,height); size_t buf_size = width * height * RGB * sizeof(pixel); uint pheight = height + P*2; uint pwidth = width + P*2; size_t pbuf_size = pwidth * pheight * sizeof(pixel); pixel *pimage = pad_image(image->buffer,width,height,P); if (settings->save_temps) { save_grayscale(pimage,pwidth,pheight,"img/padded_image.ppm"); } /* Make channels */ hipChannelFormatDesc pixel_channel = hipCreateChannelDesc<pixel>(); hipChannelFormatDesc pixel4_channel = hipCreateChannelDesc<pixel4>(); hipChannelFormatDesc float4_channel = hipCreateChannelDesc<float4>(); hipChannelFormatDesc float_channel = hipCreateChannelDesc<float>(); /* Setup source image array on device */ hipArray *d_src_image = NULL; call(hipMallocArray(&d_src_image, &pixel_channel, pwidth, pheight)); call(hipMemcpyToArray(d_src_image, 0, 0, pimage, pbuf_size, hipMemcpyHostToDevice)); /* Setup source image texture */ call(hipBindTextureToArray(src, d_src_image)); setupTexture(src); pixel *d_horz_tmpres = NULL; pixel *d_vert_tmpres = NULL; if (settings->save_temps) { /* these are just for debugging */ /* they neccessary for the algorithm */ d_horz_tmpres = (pixel*)devMalloc(buf_size); d_vert_tmpres = (pixel*)devMalloc(buf_size); } size_t dest_pbuf_size = pwidth * pheight * sizeof(pixel4); pixel4 *d_horz_g = (pixel4*)devMalloc(dest_pbuf_size); pixel4 *d_vert_g = (pixel4*)devMalloc(dest_pbuf_size); // size_t dest_pbuf_size = pwidth * pheight * sizeof(pixel) * RGB; // pixel *d_horz_g = (pixel*)devMalloc(dest_pbuf_size); // pixel *d_vert_g = (pixel*)devMalloc(dest_pbuf_size); dim3 thread_block(32, 8); dim3 pblock_grid((pwidth + thread_block.x - 1) / thread_block.x, (pheight + thread_block.y - 1) / thread_block.y); dim3 block_grid((width + thread_block.x - 1) / thread_block.x, (height + thread_block.y - 1) / thread_block.y); /*DebugI(pwidth); DebugI(pheight);*/ Info("Interpolating GREEN"); /* Interpolate horz and vert green */ RUN_KERNEL(ahd_kernel_interp_g, pblock_grid, thread_block, d_horz_g, d_vert_g, pwidth, pheight); devFreeArray(d_src_image); if (settings->save_temps) { write_d4_to_file(d_horz_g,pwidth,pheight,"img/interp_g_horz.ppm"); write_d4_to_file(d_vert_g,pwidth,pheight,"img/interp_g_vert.ppm"); } assert(pwidth %32 == 0); assert(pwidth*sizeof(pixel4) %32 ==0); /* Interpolate horz r/b */ Info("Interpolating Horizontal RED and BLUE"); size_t cie_bufsize = width * height * sizeof(float4); float4 *d_horz_result = (float4*)devMalloc(cie_bufsize); // size_t cie_bufsize = width * height * sizeof(float) * LAB; // float *d_horz_result = (float*)devMalloc(cie_bufsize); size_t offset = 1; call(hipBindTexture2D(&offset, src_g, d_horz_g, pixel4_channel, pwidth, pheight, pwidth*sizeof(pixel4))); //src_g, d_horz_g, pixel_channel, pwidth*RGB, pheight, pwidth*sizeof(pixel)*RGB)); assert(offset == 0); // this should always be zero, but check the CUDA manual wasn't lying setupTexture(src_g); RUN_KERNEL(ahd_kernel_interp_rb ,block_grid, thread_block, d_horz_result, d_horz_tmpres, pwidth, pheight); devFree(d_horz_g); float4 *d_vert_result = (float4*)devMalloc(cie_bufsize); //float *d_vert_result = (float*)devMalloc(cie_bufsize); /* Interpolate vert r/b */ call(hipBindTexture2D(&offset, src_g, d_vert_g, pixel4_channel, pwidth, pheight, pwidth*sizeof(pixel4))); /*src_g, d_vert_g, pixel_channel, pwidth*RGB, pheight, pwidth*sizeof(pixel)*RGB));*/ assert(offset == 0); setupTexture(src_g); RUN_KERNEL(ahd_kernel_interp_rb ,block_grid, thread_block, d_vert_result, d_vert_tmpres, pwidth, pheight); devFree(d_vert_g); if (settings->save_temps && d_horz_tmpres != NULL && d_vert_tmpres != NULL) { write_d_to_file(d_horz_tmpres,width,height,"img/interpolation_horz.ppm"); write_d_to_file(d_vert_tmpres,width,height,"img/interpolation_vert.ppm"); } call(hipBindTexture2D(NULL, horz_tex, d_horz_result, float4_channel, width, height, width*sizeof(float4))); call(hipBindTexture2D(NULL, vert_tex, d_vert_result, float4_channel, width, height, width*sizeof(float4))); // call(hipBindTexture2D(NULL, horz_tex, d_horz_result, float_channel, // width*RGB, height, width*sizeof(float)*RGB)); // call(hipBindTexture2D(NULL, vert_tex, d_vert_result, float_channel, // width*RGB, height, width*sizeof(float)*RGB)); setupTexture(horz_tex); setupTexture(vert_tex); size_t homo_bufsize = height * width * sizeof(uchar); uchar *d_homo_horz = (uchar *)devMalloc(homo_bufsize); uchar *d_homo_vert = (uchar *)devMalloc(homo_bufsize); RUN_KERNEL(ahd_kernel_build_homo_map, block_grid, thread_block, d_homo_horz, d_homo_vert, width, height/*, settings->ball_distance*/); if (settings->save_temps) { int scale = ball_area(settings->ball_distance); save_d_map_uchar(d_homo_horz,width,height,scale,"img/homo_map_horz.ppm"); save_d_map_uchar(d_homo_vert,width,height,scale,"img/homo_map_vert.ppm"); } call(hipBindTexture2D(NULL, homo_h_tex, d_homo_horz, pixel_channel, width, height, width*sizeof(uchar))); call(hipBindTexture2D(NULL, homo_v_tex, d_homo_vert, pixel_channel, width, height, width*sizeof(uchar))); setupTexture(homo_h_tex); setupTexture(homo_v_tex); float *d_direction_tmpres = NULL; if (settings->save_temps){ d_direction_tmpres = (float*)devMalloc(width * height * sizeof(float)); } pixel *d_result = (pixel*)devMalloc(buf_size); RUN_KERNEL(ahd_kernel_choose_direction, block_grid, thread_block, d_result,d_direction_tmpres,width,height); if (settings->save_temps) { save_d_map(d_direction_tmpres,width,height,1,"img/direction.ppm"); write_d_to_file(d_result,width,height,"img/pre_noise.ppm"); } devFree(d_horz_g); devFree(d_vert_g); devFree(d_horz_result); devFree(d_vert_result); pixel *d_temp = (pixel*)devMalloc(buf_size); for (uint i = 0; i < settings->median_filter_iterations; i++) { Info("Removing artefacts"); call(hipBindTexture2D(NULL, src, d_result, pixel_channel, width*RGB, height, width*RGB*sizeof(pixel))); RUN_KERNEL(ahd_kernel_remove_artefacts,block_grid,thread_block,d_temp, width, height); pixel *swap = d_result; d_result = d_temp; d_temp = swap; } // if (settings->median_filter_iterations %2) { // free(d_temp); // } else { // free(image->buffer); // } /* Copy result from device */ //pixel *result = (pixel *)memcpy_d_to_h(d_temp,buf_size); call(hipMemcpy(result, d_temp, buf_size, hipMemcpyDeviceToHost)); devFree(d_result); devFree(d_direction_tmpres); devFree(d_temp); devFreeArray(d_src_image); free(pimage); // hipDeviceReset(); // free(image->buffer); // image->buffer = result; // return result; }
67a3f3deb936993d4a67a3c151cbb1cb586d3018.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <base.h> #include <image.h> #include <bayer.h> #include "base.h" #include "image.h" #include "cuda_utils.h" #include "ahd_kernels.cu" #include "util.h" void run_cuda_ahd(img *image, pixel*result); #define MBs (1024 * 1024) #define MIN_AVAIL 50 * MBs void cuda_ahd(img *image) { uint height = image->height; uint width = image->width; size_t mem_needed; size_t avail = 0, total = 0; int n = 0; size_t res_size = width * height * RGB * sizeof(pixel); pixel *result = mallocz<pixel>(res_size); call(cudaMemGetInfo(&avail,&total)); //show_mem_info(); do { n++; size_t buf_size = (width+2*P) * (height+2*P)/n * sizeof(char4); size_t cie_buf_size = width * height/n * sizeof(float4); size_t homo_buf_size = width * height/n * sizeof(uchar); mem_needed = cie_buf_size * 2 + homo_buf_size * 2 + buf_size * 2; Info("Image size: %fMB Homo bufsize : %fMB CIE bufsize %fMB Mem needed: %fMB", MB(buf_size), MB(homo_buf_size), MB(cie_buf_size), MB(mem_needed)); } while (mem_needed > avail - MIN_AVAIL); int h = (height/n/2) * 2; // round to lowest multiple of 2 img *cropped_img = new_image(h,width); int offset = 0, i; for (i = 0; i < n-1; i++) { Info("Iteration %i, offset %d h: %d n: %d",i,offset,h,n); cropped_img->buffer = image->buffer + offset; run_cuda_ahd(cropped_img, result + offset); //memcpy(result + offset, res, cropped_buf_size); //free(res); offset += h * width * RGB; } /* Final tile may be slightly different size due to rounding */ cropped_img->height = height - i*h; Info("Final tile height %d",cropped_img->height); cropped_img->buffer = image->buffer + offset; run_cuda_ahd(cropped_img, result + offset); //memcpy(result + offset, res, cropped_img->height * width * RGB *sizeof(pixel)); //free(res); free(image->buffer); image->buffer = result; } void run_cuda_ahd(img *image, pixel *result) { #ifdef __CUDAEMU__ Info("Performing CUDA AHD interpolation (Emulation mode)"); #else Info("Performing CUDA AHD interpolation"); #endif uint height = image->height; uint width = image->width; #ifndef __CUDAEMU__ if (width % 32 > 0) { FatalError("Width must be a multiple of 32"); } #endif Info("Width: %d Height %d\n",width,height); size_t buf_size = width * height * RGB * sizeof(pixel); uint pheight = height + P*2; uint pwidth = width + P*2; size_t pbuf_size = pwidth * pheight * sizeof(pixel); pixel *pimage = pad_image(image->buffer,width,height,P); if (settings->save_temps) { save_grayscale(pimage,pwidth,pheight,"img/padded_image.ppm"); } /* Make channels */ cudaChannelFormatDesc pixel_channel = cudaCreateChannelDesc<pixel>(); cudaChannelFormatDesc pixel4_channel = cudaCreateChannelDesc<pixel4>(); cudaChannelFormatDesc float4_channel = cudaCreateChannelDesc<float4>(); cudaChannelFormatDesc float_channel = cudaCreateChannelDesc<float>(); /* Setup source image array on device */ cudaArray *d_src_image = NULL; call(cudaMallocArray(&d_src_image, &pixel_channel, pwidth, pheight)); call(cudaMemcpyToArray(d_src_image, 0, 0, pimage, pbuf_size, cudaMemcpyHostToDevice)); /* Setup source image texture */ call(cudaBindTextureToArray(src, d_src_image)); setupTexture(src); pixel *d_horz_tmpres = NULL; pixel *d_vert_tmpres = NULL; if (settings->save_temps) { /* these are just for debugging */ /* they neccessary for the algorithm */ d_horz_tmpres = (pixel*)devMalloc(buf_size); d_vert_tmpres = (pixel*)devMalloc(buf_size); } size_t dest_pbuf_size = pwidth * pheight * sizeof(pixel4); pixel4 *d_horz_g = (pixel4*)devMalloc(dest_pbuf_size); pixel4 *d_vert_g = (pixel4*)devMalloc(dest_pbuf_size); // size_t dest_pbuf_size = pwidth * pheight * sizeof(pixel) * RGB; // pixel *d_horz_g = (pixel*)devMalloc(dest_pbuf_size); // pixel *d_vert_g = (pixel*)devMalloc(dest_pbuf_size); dim3 thread_block(32, 8); dim3 pblock_grid((pwidth + thread_block.x - 1) / thread_block.x, (pheight + thread_block.y - 1) / thread_block.y); dim3 block_grid((width + thread_block.x - 1) / thread_block.x, (height + thread_block.y - 1) / thread_block.y); /*DebugI(pwidth); DebugI(pheight);*/ Info("Interpolating GREEN"); /* Interpolate horz and vert green */ RUN_KERNEL(ahd_kernel_interp_g, pblock_grid, thread_block, d_horz_g, d_vert_g, pwidth, pheight); devFreeArray(d_src_image); if (settings->save_temps) { write_d4_to_file(d_horz_g,pwidth,pheight,"img/interp_g_horz.ppm"); write_d4_to_file(d_vert_g,pwidth,pheight,"img/interp_g_vert.ppm"); } assert(pwidth %32 == 0); assert(pwidth*sizeof(pixel4) %32 ==0); /* Interpolate horz r/b */ Info("Interpolating Horizontal RED and BLUE"); size_t cie_bufsize = width * height * sizeof(float4); float4 *d_horz_result = (float4*)devMalloc(cie_bufsize); // size_t cie_bufsize = width * height * sizeof(float) * LAB; // float *d_horz_result = (float*)devMalloc(cie_bufsize); size_t offset = 1; call(cudaBindTexture2D(&offset, src_g, d_horz_g, pixel4_channel, pwidth, pheight, pwidth*sizeof(pixel4))); //src_g, d_horz_g, pixel_channel, pwidth*RGB, pheight, pwidth*sizeof(pixel)*RGB)); assert(offset == 0); // this should always be zero, but check the CUDA manual wasn't lying setupTexture(src_g); RUN_KERNEL(ahd_kernel_interp_rb ,block_grid, thread_block, d_horz_result, d_horz_tmpres, pwidth, pheight); devFree(d_horz_g); float4 *d_vert_result = (float4*)devMalloc(cie_bufsize); //float *d_vert_result = (float*)devMalloc(cie_bufsize); /* Interpolate vert r/b */ call(cudaBindTexture2D(&offset, src_g, d_vert_g, pixel4_channel, pwidth, pheight, pwidth*sizeof(pixel4))); /*src_g, d_vert_g, pixel_channel, pwidth*RGB, pheight, pwidth*sizeof(pixel)*RGB));*/ assert(offset == 0); setupTexture(src_g); RUN_KERNEL(ahd_kernel_interp_rb ,block_grid, thread_block, d_vert_result, d_vert_tmpres, pwidth, pheight); devFree(d_vert_g); if (settings->save_temps && d_horz_tmpres != NULL && d_vert_tmpres != NULL) { write_d_to_file(d_horz_tmpres,width,height,"img/interpolation_horz.ppm"); write_d_to_file(d_vert_tmpres,width,height,"img/interpolation_vert.ppm"); } call(cudaBindTexture2D(NULL, horz_tex, d_horz_result, float4_channel, width, height, width*sizeof(float4))); call(cudaBindTexture2D(NULL, vert_tex, d_vert_result, float4_channel, width, height, width*sizeof(float4))); // call(cudaBindTexture2D(NULL, horz_tex, d_horz_result, float_channel, // width*RGB, height, width*sizeof(float)*RGB)); // call(cudaBindTexture2D(NULL, vert_tex, d_vert_result, float_channel, // width*RGB, height, width*sizeof(float)*RGB)); setupTexture(horz_tex); setupTexture(vert_tex); size_t homo_bufsize = height * width * sizeof(uchar); uchar *d_homo_horz = (uchar *)devMalloc(homo_bufsize); uchar *d_homo_vert = (uchar *)devMalloc(homo_bufsize); RUN_KERNEL(ahd_kernel_build_homo_map, block_grid, thread_block, d_homo_horz, d_homo_vert, width, height/*, settings->ball_distance*/); if (settings->save_temps) { int scale = ball_area(settings->ball_distance); save_d_map_uchar(d_homo_horz,width,height,scale,"img/homo_map_horz.ppm"); save_d_map_uchar(d_homo_vert,width,height,scale,"img/homo_map_vert.ppm"); } call(cudaBindTexture2D(NULL, homo_h_tex, d_homo_horz, pixel_channel, width, height, width*sizeof(uchar))); call(cudaBindTexture2D(NULL, homo_v_tex, d_homo_vert, pixel_channel, width, height, width*sizeof(uchar))); setupTexture(homo_h_tex); setupTexture(homo_v_tex); float *d_direction_tmpres = NULL; if (settings->save_temps){ d_direction_tmpres = (float*)devMalloc(width * height * sizeof(float)); } pixel *d_result = (pixel*)devMalloc(buf_size); RUN_KERNEL(ahd_kernel_choose_direction, block_grid, thread_block, d_result,d_direction_tmpres,width,height); if (settings->save_temps) { save_d_map(d_direction_tmpres,width,height,1,"img/direction.ppm"); write_d_to_file(d_result,width,height,"img/pre_noise.ppm"); } devFree(d_horz_g); devFree(d_vert_g); devFree(d_horz_result); devFree(d_vert_result); pixel *d_temp = (pixel*)devMalloc(buf_size); for (uint i = 0; i < settings->median_filter_iterations; i++) { Info("Removing artefacts"); call(cudaBindTexture2D(NULL, src, d_result, pixel_channel, width*RGB, height, width*RGB*sizeof(pixel))); RUN_KERNEL(ahd_kernel_remove_artefacts,block_grid,thread_block,d_temp, width, height); pixel *swap = d_result; d_result = d_temp; d_temp = swap; } // if (settings->median_filter_iterations %2) { // free(d_temp); // } else { // free(image->buffer); // } /* Copy result from device */ //pixel *result = (pixel *)memcpy_d_to_h(d_temp,buf_size); call(cudaMemcpy(result, d_temp, buf_size, cudaMemcpyDeviceToHost)); devFree(d_result); devFree(d_direction_tmpres); devFree(d_temp); devFreeArray(d_src_image); free(pimage); // cudaThreadExit(); // free(image->buffer); // image->buffer = result; // return result; }
8c329c1784d0d193c1a52c6f55062f362e094181.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <paddle/fluid/platform/device_context.h> #include <algorithm> #include <hipcub/hipcub.hpp> // NOLINT #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { template <typename T> using kvp = hipcub::KeyValuePair<T, T>; template <typename T> using cv2 = cub::CubVector<T, 2>; template <typename T, int TPB> __device__ inline void LayerNorm(const cv2<T> &thread_data, const int ld, const int offset, const float *bias, const float *scale, T *output, float eps) { using BlockReduce = hipcub::BlockReduce<cv2<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.x; rsigma = rsqrt(sum_kv.y - mu * mu + eps); } __syncthreads(); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = output[idx]; const T g(scale[i]); const T b(bias[i]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, unsigned TPB> __global__ void EmbEltwiseLayernormKernel( int hidden, const int64_t *word_id_d, const int64_t *pos_id_d, const int64_t *sent_id_d, const T *scale, const T *bias, const T *word_emb, const T *pos_emb, const T *sent_emb, T *output, float eps) { hipcub::Sum pair_sum; // blockIdx.x: position in the sequence // blockIdx.y: batch // gridDim.x: Seq // gridDim.y: Batch __shared__ int64_t word_id; __shared__ int64_t pos_id; __shared__ int64_t sent_id; const T rhidden = T(1.f) / T(hidden); const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { word_id = word_id_d[seq_pos]; pos_id = pos_id_d[seq_pos]; sent_id = sent_id_d[seq_pos]; } __syncthreads(); // load word, pos, sentence embeddings and add them toghether const int64_t woffset = word_id * hidden; const int64_t poffset = pos_id * hidden; const int64_t soffset = sent_id * hidden; const int64_t out_offset = seq_pos * hidden; cv2<T> thread_data; thread_data.x = 0; thread_data.y = 0; #pragma unroll for (int it = threadIdx.x; it < hidden; it += TPB) { const T w(word_emb[woffset + it]); const T p(pos_emb[poffset + it]); const T s(sent_emb[soffset + it]); const T val = w + s + p; output[out_offset + it] = val; const T rhiddenval = rhidden * val; cv2<T> temp_data; temp_data.x = rhiddenval; temp_data.y = rhiddenval * val; thread_data = pair_sum(thread_data, temp_data); } LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps); } template <typename DeviceContext, typename T> class EmbeddingEltWiseLayerNormKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { using Tensor = framework::Tensor; auto *word_id = context.Input<framework::Tensor>("WordId"); auto *pos_id = context.Input<framework::Tensor>("PosId"); auto *sent_id = context.Input<framework::Tensor>("SentId"); auto *word_emb = context.Input<framework::Tensor>("WordEmb"); auto *pos_emb = context.Input<framework::Tensor>("PosEmb"); auto *sent_emb = context.Input<framework::Tensor>("SentEmb"); auto *bias = context.Input<framework::Tensor>("Bias"); auto *scale = context.Input<framework::Tensor>("Scale"); auto *out = context.Output<framework::Tensor>("Out"); auto *word_id_d = word_id->data<int64_t>(); auto *pos_id_d = pos_id->data<int64_t>(); auto *sent_id_d = sent_id->data<int64_t>(); auto *word_emb_d = word_emb->data<T>(); auto *pos_emb_d = pos_emb->data<T>(); auto *sent_emb_d = sent_emb->data<T>(); auto *bias_d = bias->data<T>(); auto *scale_d = scale->data<T>(); auto *output_d = out->mutable_data<T>(context.GetPlace()); // compute q*k with eltadd auto &device_ctx = context.template device_context<DeviceContext>(); float eps = context.Attr<float>("epsilon"); // should be (B * S * hidden) auto word_id_dims = word_id->dims(); auto word_emb_dims = word_emb->dims(); int batch = word_id_dims[0]; int seq_len = word_id_dims[1]; int hidden = word_emb_dims[1]; const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); hipLaunchKernelGGL(( EmbEltwiseLayernormKernel<T, tpb>), dim3(grid), dim3(block), 0, device_ctx.stream(), hidden, word_id_d, pos_id_d, sent_id_d, scale_d, bias_d, word_emb_d, pos_emb_d, sent_emb_d, output_d, eps); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel< paddle::platform::CUDADeviceContext, float>);
8c329c1784d0d193c1a52c6f55062f362e094181.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <paddle/fluid/platform/device_context.h> #include <algorithm> #include <cub/cub.cuh> // NOLINT #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/detail/safe_ref.h" #include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { template <typename T> using kvp = cub::KeyValuePair<T, T>; template <typename T> using cv2 = cub::CubVector<T, 2>; template <typename T, int TPB> __device__ inline void LayerNorm(const cv2<T> &thread_data, const int ld, const int offset, const float *bias, const float *scale, T *output, float eps) { using BlockReduce = cub::BlockReduce<cv2<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.x; rsigma = rsqrt(sum_kv.y - mu * mu + eps); } __syncthreads(); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = output[idx]; const T g(scale[i]); const T b(bias[i]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, unsigned TPB> __global__ void EmbEltwiseLayernormKernel( int hidden, const int64_t *word_id_d, const int64_t *pos_id_d, const int64_t *sent_id_d, const T *scale, const T *bias, const T *word_emb, const T *pos_emb, const T *sent_emb, T *output, float eps) { cub::Sum pair_sum; // blockIdx.x: position in the sequence // blockIdx.y: batch // gridDim.x: Seq // gridDim.y: Batch __shared__ int64_t word_id; __shared__ int64_t pos_id; __shared__ int64_t sent_id; const T rhidden = T(1.f) / T(hidden); const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { word_id = word_id_d[seq_pos]; pos_id = pos_id_d[seq_pos]; sent_id = sent_id_d[seq_pos]; } __syncthreads(); // load word, pos, sentence embeddings and add them toghether const int64_t woffset = word_id * hidden; const int64_t poffset = pos_id * hidden; const int64_t soffset = sent_id * hidden; const int64_t out_offset = seq_pos * hidden; cv2<T> thread_data; thread_data.x = 0; thread_data.y = 0; #pragma unroll for (int it = threadIdx.x; it < hidden; it += TPB) { const T w(word_emb[woffset + it]); const T p(pos_emb[poffset + it]); const T s(sent_emb[soffset + it]); const T val = w + s + p; output[out_offset + it] = val; const T rhiddenval = rhidden * val; cv2<T> temp_data; temp_data.x = rhiddenval; temp_data.y = rhiddenval * val; thread_data = pair_sum(thread_data, temp_data); } LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps); } template <typename DeviceContext, typename T> class EmbeddingEltWiseLayerNormKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { using Tensor = framework::Tensor; auto *word_id = context.Input<framework::Tensor>("WordId"); auto *pos_id = context.Input<framework::Tensor>("PosId"); auto *sent_id = context.Input<framework::Tensor>("SentId"); auto *word_emb = context.Input<framework::Tensor>("WordEmb"); auto *pos_emb = context.Input<framework::Tensor>("PosEmb"); auto *sent_emb = context.Input<framework::Tensor>("SentEmb"); auto *bias = context.Input<framework::Tensor>("Bias"); auto *scale = context.Input<framework::Tensor>("Scale"); auto *out = context.Output<framework::Tensor>("Out"); auto *word_id_d = word_id->data<int64_t>(); auto *pos_id_d = pos_id->data<int64_t>(); auto *sent_id_d = sent_id->data<int64_t>(); auto *word_emb_d = word_emb->data<T>(); auto *pos_emb_d = pos_emb->data<T>(); auto *sent_emb_d = sent_emb->data<T>(); auto *bias_d = bias->data<T>(); auto *scale_d = scale->data<T>(); auto *output_d = out->mutable_data<T>(context.GetPlace()); // compute q*k with eltadd auto &device_ctx = context.template device_context<DeviceContext>(); float eps = context.Attr<float>("epsilon"); // should be (B * S * hidden) auto word_id_dims = word_id->dims(); auto word_emb_dims = word_emb->dims(); int batch = word_id_dims[0]; int seq_len = word_id_dims[1]; int hidden = word_emb_dims[1]; const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); EmbEltwiseLayernormKernel<T, tpb><<<grid, block, 0, device_ctx.stream()>>>( hidden, word_id_d, pos_id_d, sent_id_d, scale_d, bias_d, word_emb_d, pos_emb_d, sent_emb_d, output_d, eps); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel< paddle::platform::CUDADeviceContext, float>);
f37d4298d62b2bcf410200b7f63e0f543d2b7501.hip
// !!! This is a file automatically generated by hipify!!! /* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a RAJA+CUDA implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <nvector/raja/Vector.hpp> #include <RAJA/RAJA.hpp> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) // RAJA defines #define CUDA_BLOCK_SIZE 256 #define RAJA_NODE_TYPE RAJA::cuda_exec< CUDA_BLOCK_SIZE > #define RAJA_REDUCE_TYPE RAJA::cuda_reduce #define RAJA_LAMBDA [=] __device__ extern "C" { using namespace sunrajavec; // Type defines typedef sunrajavec::Vector<realtype, sunindextype> vector_type; // Static constants static constexpr sunindextype zeroIdx = 0; /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Raja(N_Vector v) { return SUNDIALS_NVEC_RAJA; } N_Vector N_VNewEmpty_Raja() { N_Vector v; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Raja; v->ops->nvclone = N_VClone_Raja; v->ops->nvcloneempty = N_VCloneEmpty_Raja; v->ops->nvdestroy = N_VDestroy_Raja; v->ops->nvspace = N_VSpace_Raja; v->ops->nvgetlength = N_VGetLength_Raja; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Raja; v->ops->nvconst = N_VConst_Raja; v->ops->nvprod = N_VProd_Raja; v->ops->nvdiv = N_VDiv_Raja; v->ops->nvscale = N_VScale_Raja; v->ops->nvabs = N_VAbs_Raja; v->ops->nvinv = N_VInv_Raja; v->ops->nvaddconst = N_VAddConst_Raja; v->ops->nvdotprod = N_VDotProd_Raja; v->ops->nvmaxnorm = N_VMaxNorm_Raja; v->ops->nvmin = N_VMin_Raja; v->ops->nvl1norm = N_VL1Norm_Raja; v->ops->nvinvtest = N_VInvTest_Raja; v->ops->nvconstrmask = N_VConstrMask_Raja; v->ops->nvminquotient = N_VMinQuotient_Raja; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Raja; v->ops->nvwrmsnorm = N_VWrmsNorm_Raja; v->ops->nvwl2norm = N_VWL2Norm_Raja; v->ops->nvcompare = N_VCompare_Raja; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Raja; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Raja; v->ops->nvdotprodlocal = N_VDotProd_Raja; v->ops->nvmaxnormlocal = N_VMaxNorm_Raja; v->ops->nvminlocal = N_VMin_Raja; v->ops->nvl1normlocal = N_VL1Norm_Raja; v->ops->nvinvtestlocal = N_VInvTest_Raja; v->ops->nvconstrmasklocal = N_VConstrMask_Raja; v->ops->nvminquotientlocal = N_VMinQuotient_Raja; return(v); } N_Vector N_VNew_Raja(sunindextype length) { N_Vector v; v = NULL; v = N_VNewEmpty_Raja(); if (v == NULL) return(NULL); v->content = new vector_type(length); return(v); } N_Vector N_VMake_Raja(N_VectorContent_Raja c) { N_Vector v; vector_type* x = static_cast<vector_type*>(c); sunindextype length = x->size(); v = NULL; v = N_VNewEmpty_Raja(); if (v == NULL) return(NULL); v->content = c; return(v); } /* ----------------------------------------------------------------- * Function to return the global length of the vector. */ sunindextype N_VGetLength_Raja(N_Vector v) { vector_type* xd = static_cast<vector_type*>(v->content); return xd->size(); } /* ---------------------------------------------------------------------------- * Return pointer to the raw host data */ realtype *N_VGetHostArrayPointer_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); return (xv->host()); } /* ---------------------------------------------------------------------------- * Return pointer to the raw device data */ realtype *N_VGetDeviceArrayPointer_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); return (xv->device()); } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); xv->copyToDev(); } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); xv->copyFromDev(); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to stdout */ void N_VPrint_Raja(N_Vector X) { N_VPrintFile_Raja(X, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to outfile */ void N_VPrintFile_Raja(N_Vector X, FILE *outfile) { const realtype *xd = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); sunindextype i; for (i = 0; i < N; ++i) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Raja(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VClone_Raja(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Raja(w); if (v == NULL) return(NULL); vector_type* wdat = static_cast<vector_type*>(w->content); vector_type* vdat = new vector_type(*wdat); v->content = vdat; return(v); } void N_VDestroy_Raja(N_Vector v) { if (v == NULL) return; vector_type* x = static_cast<vector_type*>(v->content); if (x != NULL) { delete x; v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Raja(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = N_VGetLength_Raja(X); *liw = 2; } void N_VConst_Raja(realtype c, N_Vector Z) { const sunindextype N = N_VGetLength_Raja(Z); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = c; }); } void N_VLinearSum_Raja(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = a*xdata[i] + b*ydata[i]; } ); } void N_VProd_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] * ydata[i]; } ); } void N_VDiv_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] / ydata[i]; } ); } void N_VScale_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = c * xdata[i]; } ); } void N_VAbs_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = abs(xdata[i]); } ); } void N_VInv_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = ONE / xdata[i]; } ); } void N_VAddConst_Raja(N_Vector X, realtype b, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] + b; } ); } realtype N_VDotProd_Raja(N_Vector X, N_Vector Y) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += xdata[i] * ydata[i] ; } ); return(static_cast<realtype>(gpu_result)); } realtype N_VMaxNorm_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceMax< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result.max(abs(xdata[i])); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWSqrSumLocal_Raja(N_Vector X, N_Vector W) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *wdata = N_VGetDeviceArrayPointer_Raja(W); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWrmsNorm_Raja(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Raja(X, W); const sunindextype N = N_VGetLength_Raja(X); return std::sqrt(sum/N); } realtype N_VWSqrSumMaskLocal_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *wdata = N_VGetDeviceArrayPointer_Raja(W); const realtype *iddata = N_VGetDeviceArrayPointer_Raja(ID); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (iddata[i] > ZERO) gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWrmsNormMask_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype sum = N_VWSqrSumMaskLocal_Raja(X, W, ID); const sunindextype N = N_VGetLength_Raja(X); return std::sqrt(sum/N); } realtype N_VMin_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceMin< RAJA_REDUCE_TYPE, realtype> gpu_result(std::numeric_limits<realtype>::max()); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result.min(xdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWL2Norm_Raja(N_Vector X, N_Vector W) { return std::sqrt(N_VWSqrSumLocal_Raja(X, W)); } realtype N_VL1Norm_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += (abs(xdata[i])); } ); return(static_cast<realtype>(gpu_result)); } void N_VCompare_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = abs(xdata[i]) >= c ? ONE : ZERO; } ); } booleantype N_VInvTest_Raja(N_Vector x, N_Vector z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(x); const sunindextype N = N_VGetLength_Raja(x); realtype *zdata = N_VGetDeviceArrayPointer_Raja(z); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(ZERO); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (xdata[i] == ZERO) { gpu_result += ONE; } else { zdata[i] = ONE/xdata[i]; } } ); realtype minimum = static_cast<realtype>(gpu_result); return (minimum < HALF); } booleantype N_VConstrMask_Raja(N_Vector c, N_Vector x, N_Vector m) { const realtype *cdata = N_VGetDeviceArrayPointer_Raja(c); const realtype *xdata = N_VGetDeviceArrayPointer_Raja(x); const sunindextype N = N_VGetLength_Raja(x); realtype *mdata = N_VGetDeviceArrayPointer_Raja(m); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(ZERO); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { bool test = (abs(cdata[i]) > ONEPT5 && cdata[i]*xdata[i] <= ZERO) || (abs(cdata[i]) > HALF && cdata[i]*xdata[i] < ZERO); mdata[i] = test ? ONE : ZERO; gpu_result += mdata[i]; } ); realtype sum = static_cast<realtype>(gpu_result); return(sum < HALF); } realtype N_VMinQuotient_Raja(N_Vector num, N_Vector denom) { const realtype *ndata = N_VGetDeviceArrayPointer_Raja(num); const realtype *ddata = N_VGetDeviceArrayPointer_Raja(denom); const sunindextype N = N_VGetLength_Raja(num); RAJA::ReduceMin< RAJA_REDUCE_TYPE, realtype> gpu_result(std::numeric_limits<realtype>::max()); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (ddata[i] != ZERO) gpu_result.min(ndata[i]/ddata[i]); } ); return(static_cast<realtype>(gpu_result)); } /* * ----------------------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------------------- */ int N_VLinearCombination_Raja(int nvec, realtype* c, N_Vector* X, N_Vector z) { hipError_t err; sunindextype N = N_VGetLength_Raja(z); realtype* d_zd = N_VGetDeviceArrayPointer_Raja(z); // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { d_zd[i] = d_c[0] * d_Xd[0][i]; for (int j=1; j<nvec; j++) d_zd[i] += d_c[j] * d_Xd[j][i]; } ); // Free host array delete[] h_Xd; // Free device arrays err = hipFree(d_c); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Xd); if (err != hipSuccess) return hipGetLastError(); return(0); } int N_VScaleAddMulti_Raja(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(x); realtype* d_xd = N_VGetDeviceArrayPointer_Raja(x); // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); // Create array of device pointers on host realtype** h_Yd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Yd[j] = N_VGetDeviceArrayPointer_Raja(Y[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Yd; err = hipMalloc((void**) &d_Yd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = d_c[j] * d_xd[i] + d_Yd[j][i]; } ); // Free host array delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_c); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Yd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } /* * ----------------------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Raja(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Yd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Yd[j] = N_VGetDeviceArrayPointer_Raja(Y[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Yd; err = hipMalloc((void**) &d_Yd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = a * d_Xd[j][i] + b * d_Yd[j][i]; } ); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_Xd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Yd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } int N_VScaleVectorArray_Raja(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = d_c[j] * d_Xd[j][i]; } ); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = hipFree(d_Xd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } int N_VConstVectorArray_Raja(int nvec, realtype c, N_Vector* Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Create array of device pointers on host realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = c; } ); // Free host array delete[] h_Zd; // Free device arrays err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } int N_VScaleAddMultiVectorArray_Raja(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(X[0]); // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nsum*sizeof(realtype)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Yd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Yd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(Y[k][j]); realtype** h_Zd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Zd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(Z[k][j]); // Copy array of device pointers to device from host realtype** d_Xd; err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Yd; err = hipMalloc((void**) &d_Yd, nsum*nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Yd, h_Yd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nsum*nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) d_Zd[j*nsum+k][i] = d_c[k] * d_Xd[j][i] + d_Yd[j*nsum+k][i]; } ); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = hipFree(d_Xd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Yd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } int N_VLinearCombinationVectorArray_Raja(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { hipError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Copy c array to device realtype* d_c; err = hipMalloc((void**) &d_c, nsum*sizeof(realtype)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Xd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(X[k][j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = hipMalloc((void**) &d_Xd, nsum*nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Xd, h_Xd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); realtype** d_Zd; err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != hipSuccess) return hipGetLastError(); err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice); if (err != hipSuccess) return hipGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) { d_Zd[j][i] = d_c[0] * d_Xd[j*nsum][i]; for (int k=1; k<nsum; k++) { d_Zd[j][i] += d_c[k] * d_Xd[j*nsum+k][i]; } } } ); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = hipFree(d_Xd); if (err != hipSuccess) return hipGetLastError(); err = hipFree(d_Zd); if (err != hipSuccess) return hipGetLastError(); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Raja; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; v->ops->nvdotprodmulti = NULL; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Raja; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } } // extern "C"
f37d4298d62b2bcf410200b7f63e0f543d2b7501.cu
/* ----------------------------------------------------------------- * Programmer(s): Slaven Peles, Cody J. Balos @ LLNL * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * This is the implementation file for a RAJA+CUDA implementation * of the NVECTOR package. * -----------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <nvector/raja/Vector.hpp> #include <RAJA/RAJA.hpp> #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define ONEPT5 RCONST(1.5) // RAJA defines #define CUDA_BLOCK_SIZE 256 #define RAJA_NODE_TYPE RAJA::cuda_exec< CUDA_BLOCK_SIZE > #define RAJA_REDUCE_TYPE RAJA::cuda_reduce #define RAJA_LAMBDA [=] __device__ extern "C" { using namespace sunrajavec; // Type defines typedef sunrajavec::Vector<realtype, sunindextype> vector_type; // Static constants static constexpr sunindextype zeroIdx = 0; /* ---------------------------------------------------------------- * Returns vector type ID. Used to identify vector implementation * from abstract N_Vector interface. */ N_Vector_ID N_VGetVectorID_Raja(N_Vector v) { return SUNDIALS_NVEC_RAJA; } N_Vector N_VNewEmpty_Raja() { N_Vector v; /* Create an empty vector object */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ /* constructors, destructors, and utility operations */ v->ops->nvgetvectorid = N_VGetVectorID_Raja; v->ops->nvclone = N_VClone_Raja; v->ops->nvcloneempty = N_VCloneEmpty_Raja; v->ops->nvdestroy = N_VDestroy_Raja; v->ops->nvspace = N_VSpace_Raja; v->ops->nvgetlength = N_VGetLength_Raja; /* standard vector operations */ v->ops->nvlinearsum = N_VLinearSum_Raja; v->ops->nvconst = N_VConst_Raja; v->ops->nvprod = N_VProd_Raja; v->ops->nvdiv = N_VDiv_Raja; v->ops->nvscale = N_VScale_Raja; v->ops->nvabs = N_VAbs_Raja; v->ops->nvinv = N_VInv_Raja; v->ops->nvaddconst = N_VAddConst_Raja; v->ops->nvdotprod = N_VDotProd_Raja; v->ops->nvmaxnorm = N_VMaxNorm_Raja; v->ops->nvmin = N_VMin_Raja; v->ops->nvl1norm = N_VL1Norm_Raja; v->ops->nvinvtest = N_VInvTest_Raja; v->ops->nvconstrmask = N_VConstrMask_Raja; v->ops->nvminquotient = N_VMinQuotient_Raja; v->ops->nvwrmsnormmask = N_VWrmsNormMask_Raja; v->ops->nvwrmsnorm = N_VWrmsNorm_Raja; v->ops->nvwl2norm = N_VWL2Norm_Raja; v->ops->nvcompare = N_VCompare_Raja; /* fused and vector array operations are disabled (NULL) by default */ /* local reduction operations */ v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Raja; v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Raja; v->ops->nvdotprodlocal = N_VDotProd_Raja; v->ops->nvmaxnormlocal = N_VMaxNorm_Raja; v->ops->nvminlocal = N_VMin_Raja; v->ops->nvl1normlocal = N_VL1Norm_Raja; v->ops->nvinvtestlocal = N_VInvTest_Raja; v->ops->nvconstrmasklocal = N_VConstrMask_Raja; v->ops->nvminquotientlocal = N_VMinQuotient_Raja; return(v); } N_Vector N_VNew_Raja(sunindextype length) { N_Vector v; v = NULL; v = N_VNewEmpty_Raja(); if (v == NULL) return(NULL); v->content = new vector_type(length); return(v); } N_Vector N_VMake_Raja(N_VectorContent_Raja c) { N_Vector v; vector_type* x = static_cast<vector_type*>(c); sunindextype length = x->size(); v = NULL; v = N_VNewEmpty_Raja(); if (v == NULL) return(NULL); v->content = c; return(v); } /* ----------------------------------------------------------------- * Function to return the global length of the vector. */ sunindextype N_VGetLength_Raja(N_Vector v) { vector_type* xd = static_cast<vector_type*>(v->content); return xd->size(); } /* ---------------------------------------------------------------------------- * Return pointer to the raw host data */ realtype *N_VGetHostArrayPointer_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); return (xv->host()); } /* ---------------------------------------------------------------------------- * Return pointer to the raw device data */ realtype *N_VGetDeviceArrayPointer_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); return (xv->device()); } /* ---------------------------------------------------------------------------- * Copy vector data to the device */ void N_VCopyToDevice_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); xv->copyToDev(); } /* ---------------------------------------------------------------------------- * Copy vector data from the device to the host */ void N_VCopyFromDevice_Raja(N_Vector x) { vector_type* xv = static_cast<vector_type*>(x->content); xv->copyFromDev(); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to stdout */ void N_VPrint_Raja(N_Vector X) { N_VPrintFile_Raja(X, stdout); } /* ---------------------------------------------------------------------------- * Function to print the a serial vector to outfile */ void N_VPrintFile_Raja(N_Vector X, FILE *outfile) { const realtype *xd = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); sunindextype i; for (i = 0; i < N; ++i) { #if defined(SUNDIALS_EXTENDED_PRECISION) fprintf(outfile, "%35.32Lg\n", xd[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) fprintf(outfile, "%19.16g\n", xd[i]); #else fprintf(outfile, "%11.8g\n", xd[i]); #endif } fprintf(outfile, "\n"); return; } /* * ----------------------------------------------------------------- * implementation of vector operations * ----------------------------------------------------------------- */ N_Vector N_VCloneEmpty_Raja(N_Vector w) { N_Vector v; if (w == NULL) return(NULL); /* Create vector */ v = NULL; v = N_VNewEmpty(); if (v == NULL) return(NULL); /* Attach operations */ if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); } return(v); } N_Vector N_VClone_Raja(N_Vector w) { N_Vector v; v = NULL; v = N_VCloneEmpty_Raja(w); if (v == NULL) return(NULL); vector_type* wdat = static_cast<vector_type*>(w->content); vector_type* vdat = new vector_type(*wdat); v->content = vdat; return(v); } void N_VDestroy_Raja(N_Vector v) { if (v == NULL) return; vector_type* x = static_cast<vector_type*>(v->content); if (x != NULL) { delete x; v->content = NULL; } /* free ops and vector */ if (v->ops != NULL) { free(v->ops); v->ops = NULL; } free(v); v = NULL; return; } void N_VSpace_Raja(N_Vector X, sunindextype *lrw, sunindextype *liw) { *lrw = N_VGetLength_Raja(X); *liw = 2; } void N_VConst_Raja(realtype c, N_Vector Z) { const sunindextype N = N_VGetLength_Raja(Z); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = c; }); } void N_VLinearSum_Raja(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = a*xdata[i] + b*ydata[i]; } ); } void N_VProd_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] * ydata[i]; } ); } void N_VDiv_Raja(N_Vector X, N_Vector Y, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] / ydata[i]; } ); } void N_VScale_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = c * xdata[i]; } ); } void N_VAbs_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = abs(xdata[i]); } ); } void N_VInv_Raja(N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall<RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = ONE / xdata[i]; } ); } void N_VAddConst_Raja(N_Vector X, realtype b, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = xdata[i] + b; } ); } realtype N_VDotProd_Raja(N_Vector X, N_Vector Y) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *ydata = N_VGetDeviceArrayPointer_Raja(Y); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += xdata[i] * ydata[i] ; } ); return(static_cast<realtype>(gpu_result)); } realtype N_VMaxNorm_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceMax< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result.max(abs(xdata[i])); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWSqrSumLocal_Raja(N_Vector X, N_Vector W) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *wdata = N_VGetDeviceArrayPointer_Raja(W); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWrmsNorm_Raja(N_Vector X, N_Vector W) { const realtype sum = N_VWSqrSumLocal_Raja(X, W); const sunindextype N = N_VGetLength_Raja(X); return std::sqrt(sum/N); } realtype N_VWSqrSumMaskLocal_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const realtype *wdata = N_VGetDeviceArrayPointer_Raja(W); const realtype *iddata = N_VGetDeviceArrayPointer_Raja(ID); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (iddata[i] > ZERO) gpu_result += (xdata[i] * wdata[i] * xdata[i] * wdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWrmsNormMask_Raja(N_Vector X, N_Vector W, N_Vector ID) { const realtype sum = N_VWSqrSumMaskLocal_Raja(X, W, ID); const sunindextype N = N_VGetLength_Raja(X); return std::sqrt(sum/N); } realtype N_VMin_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceMin< RAJA_REDUCE_TYPE, realtype> gpu_result(std::numeric_limits<realtype>::max()); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result.min(xdata[i]); } ); return(static_cast<realtype>(gpu_result)); } realtype N_VWL2Norm_Raja(N_Vector X, N_Vector W) { return std::sqrt(N_VWSqrSumLocal_Raja(X, W)); } realtype N_VL1Norm_Raja(N_Vector X) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(0.0); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { gpu_result += (abs(xdata[i])); } ); return(static_cast<realtype>(gpu_result)); } void N_VCompare_Raja(realtype c, N_Vector X, N_Vector Z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(X); const sunindextype N = N_VGetLength_Raja(X); realtype *zdata = N_VGetDeviceArrayPointer_Raja(Z); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { zdata[i] = abs(xdata[i]) >= c ? ONE : ZERO; } ); } booleantype N_VInvTest_Raja(N_Vector x, N_Vector z) { const realtype *xdata = N_VGetDeviceArrayPointer_Raja(x); const sunindextype N = N_VGetLength_Raja(x); realtype *zdata = N_VGetDeviceArrayPointer_Raja(z); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(ZERO); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (xdata[i] == ZERO) { gpu_result += ONE; } else { zdata[i] = ONE/xdata[i]; } } ); realtype minimum = static_cast<realtype>(gpu_result); return (minimum < HALF); } booleantype N_VConstrMask_Raja(N_Vector c, N_Vector x, N_Vector m) { const realtype *cdata = N_VGetDeviceArrayPointer_Raja(c); const realtype *xdata = N_VGetDeviceArrayPointer_Raja(x); const sunindextype N = N_VGetLength_Raja(x); realtype *mdata = N_VGetDeviceArrayPointer_Raja(m); RAJA::ReduceSum< RAJA_REDUCE_TYPE, realtype> gpu_result(ZERO); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { bool test = (abs(cdata[i]) > ONEPT5 && cdata[i]*xdata[i] <= ZERO) || (abs(cdata[i]) > HALF && cdata[i]*xdata[i] < ZERO); mdata[i] = test ? ONE : ZERO; gpu_result += mdata[i]; } ); realtype sum = static_cast<realtype>(gpu_result); return(sum < HALF); } realtype N_VMinQuotient_Raja(N_Vector num, N_Vector denom) { const realtype *ndata = N_VGetDeviceArrayPointer_Raja(num); const realtype *ddata = N_VGetDeviceArrayPointer_Raja(denom); const sunindextype N = N_VGetLength_Raja(num); RAJA::ReduceMin< RAJA_REDUCE_TYPE, realtype> gpu_result(std::numeric_limits<realtype>::max()); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { if (ddata[i] != ZERO) gpu_result.min(ndata[i]/ddata[i]); } ); return(static_cast<realtype>(gpu_result)); } /* * ----------------------------------------------------------------------------- * fused vector operations * ----------------------------------------------------------------------------- */ int N_VLinearCombination_Raja(int nvec, realtype* c, N_Vector* X, N_Vector z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(z); realtype* d_zd = N_VGetDeviceArrayPointer_Raja(z); // Copy c array to device realtype* d_c; err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { d_zd[i] = d_c[0] * d_Xd[0][i]; for (int j=1; j<nvec; j++) d_zd[i] += d_c[j] * d_Xd[j][i]; } ); // Free host array delete[] h_Xd; // Free device arrays err = cudaFree(d_c); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Xd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } int N_VScaleAddMulti_Raja(int nvec, realtype* c, N_Vector x, N_Vector* Y, N_Vector* Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(x); realtype* d_xd = N_VGetDeviceArrayPointer_Raja(x); // Copy c array to device realtype* d_c; err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); // Create array of device pointers on host realtype** h_Yd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Yd[j] = N_VGetDeviceArrayPointer_Raja(Y[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Yd; err = cudaMalloc((void**) &d_Yd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = d_c[j] * d_xd[i] + d_Yd[j][i]; } ); // Free host array delete[] h_Yd; delete[] h_Zd; // Free device arrays err = cudaFree(d_c); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Yd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } /* * ----------------------------------------------------------------------------- * vector array operations * ----------------------------------------------------------------------------- */ int N_VLinearSumVectorArray_Raja(int nvec, realtype a, N_Vector* X, realtype b, N_Vector* Y, N_Vector* Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Yd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Yd[j] = N_VGetDeviceArrayPointer_Raja(Y[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Yd; err = cudaMalloc((void**) &d_Yd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = a * d_Xd[j][i] + b * d_Yd[j][i]; } ); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = cudaFree(d_Xd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Yd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } int N_VScaleVectorArray_Raja(int nvec, realtype* c, N_Vector* X, N_Vector* Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Copy c array to device realtype* d_c; err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = d_c[j] * d_Xd[j][i]; } ); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = cudaFree(d_Xd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } int N_VConstVectorArray_Raja(int nvec, realtype c, N_Vector* Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Create array of device pointers on host realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) d_Zd[j][i] = c; } ); // Free host array delete[] h_Zd; // Free device arrays err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } int N_VScaleAddMultiVectorArray_Raja(int nvec, int nsum, realtype* c, N_Vector* X, N_Vector** Y, N_Vector** Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(X[0]); // Copy c array to device realtype* d_c; err = cudaMalloc((void**) &d_c, nsum*sizeof(realtype)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_c, c, nsum*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Xd[j] = N_VGetDeviceArrayPointer_Raja(X[j]); realtype** h_Yd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Yd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(Y[k][j]); realtype** h_Zd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Zd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(Z[k][j]); // Copy array of device pointers to device from host realtype** d_Xd; err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Yd; err = cudaMalloc((void**) &d_Yd, nsum*nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Yd, h_Yd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nsum*nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) d_Zd[j*nsum+k][i] = d_c[k] * d_Xd[j][i] + d_Yd[j*nsum+k][i]; } ); // Free host array delete[] h_Xd; delete[] h_Yd; delete[] h_Zd; // Free device arrays err = cudaFree(d_Xd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Yd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } int N_VLinearCombinationVectorArray_Raja(int nvec, int nsum, realtype* c, N_Vector** X, N_Vector* Z) { cudaError_t err; sunindextype N = N_VGetLength_Raja(Z[0]); // Copy c array to device realtype* d_c; err = cudaMalloc((void**) &d_c, nsum*sizeof(realtype)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_c, c, nsum*sizeof(realtype), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); // Create array of device pointers on host realtype** h_Xd = new realtype*[nsum*nvec]; for (int j=0; j<nvec; j++) for (int k=0; k<nsum; k++) h_Xd[j*nsum+k] = N_VGetDeviceArrayPointer_Raja(X[k][j]); realtype** h_Zd = new realtype*[nvec]; for (int j=0; j<nvec; j++) h_Zd[j] = N_VGetDeviceArrayPointer_Raja(Z[j]); // Copy array of device pointers to device from host realtype** d_Xd; err = cudaMalloc((void**) &d_Xd, nsum*nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Xd, h_Xd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); realtype** d_Zd; err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*)); if (err != cudaSuccess) return cudaGetLastError(); err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice); if (err != cudaSuccess) return cudaGetLastError(); RAJA::forall< RAJA_NODE_TYPE >(RAJA::RangeSegment(zeroIdx, N), RAJA_LAMBDA(sunindextype i) { for (int j=0; j<nvec; j++) { d_Zd[j][i] = d_c[0] * d_Xd[j*nsum][i]; for (int k=1; k<nsum; k++) { d_Zd[j][i] += d_c[k] * d_Xd[j*nsum+k][i]; } } } ); // Free host array delete[] h_Xd; delete[] h_Zd; // Free device arrays err = cudaFree(d_Xd); if (err != cudaSuccess) return cudaGetLastError(); err = cudaFree(d_Zd); if (err != cudaSuccess) return cudaGetLastError(); return(0); } /* * ----------------------------------------------------------------- * Enable / Disable fused and vector array operations * ----------------------------------------------------------------- */ int N_VEnableFusedOps_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); if (tf) { /* enable all fused vector operations */ v->ops->nvlinearcombination = N_VLinearCombination_Raja; v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; v->ops->nvdotprodmulti = NULL; /* enable all vector array operations */ v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; } else { /* disable all fused vector operations */ v->ops->nvlinearcombination = NULL; v->ops->nvscaleaddmulti = NULL; v->ops->nvdotprodmulti = NULL; /* disable all vector array operations */ v->ops->nvlinearsumvectorarray = NULL; v->ops->nvscalevectorarray = NULL; v->ops->nvconstvectorarray = NULL; v->ops->nvwrmsnormvectorarray = NULL; v->ops->nvwrmsnormmaskvectorarray = NULL; v->ops->nvscaleaddmultivectorarray = NULL; v->ops->nvlinearcombinationvectorarray = NULL; } /* return success */ return(0); } int N_VEnableLinearCombination_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombination = N_VLinearCombination_Raja; else v->ops->nvlinearcombination = NULL; /* return success */ return(0); } int N_VEnableScaleAddMulti_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmulti = N_VScaleAddMulti_Raja; else v->ops->nvscaleaddmulti = NULL; /* return success */ return(0); } int N_VEnableLinearSumVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Raja; else v->ops->nvlinearsumvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscalevectorarray = N_VScaleVectorArray_Raja; else v->ops->nvscalevectorarray = NULL; /* return success */ return(0); } int N_VEnableConstVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvconstvectorarray = N_VConstVectorArray_Raja; else v->ops->nvconstvectorarray = NULL; /* return success */ return(0); } int N_VEnableScaleAddMultiVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Raja; else v->ops->nvscaleaddmultivectorarray = NULL; /* return success */ return(0); } int N_VEnableLinearCombinationVectorArray_Raja(N_Vector v, booleantype tf) { /* check that vector is non-NULL */ if (v == NULL) return(-1); /* check that ops structure is non-NULL */ if (v->ops == NULL) return(-1); /* enable/disable operation */ if (tf) v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Raja; else v->ops->nvlinearcombinationvectorarray = NULL; /* return success */ return(0); } } // extern "C"
38665021ae7c3bb252fb4def9a2a85887650ae65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "wb.h" #define MASK_WIDTH 5 #define MASK_RADIUS MASK_WIDTH / 2 __global__ void convolution(const float *__restrict__ I, const float *__restrict__ M, float *__restrict__ P, const int channels, const int width, const int height) { const unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; const unsigned int j = threadIdx.y + blockDim.y * blockIdx.y; if (i < width && j < height) { for (unsigned int c = 0; c < channels; c++) { float accumulator = 0; for (int y = -MASK_RADIUS; y <= MASK_RADIUS; y++) { for (int x = -MASK_RADIUS; x <= MASK_RADIUS; x++) { const int imgX = (int) i + x; const int imgY = (int) j + y; if ((imgX >= 0 && imgX < width) && (imgY >= 0 && imgY < height)) { unsigned const int imgXY = (imgX + imgY * width) * channels + c; const float imgPixel = I[imgXY]; const float maskValue = M[(x + MASK_RADIUS) + (y + MASK_RADIUS) * MASK_WIDTH]; accumulator += imgPixel * maskValue; } } } P[(i + j * width) * channels + c] = __saturatef(accumulator); } } } int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); const size_t imageSize = imageWidth * imageHeight * imageChannels * sizeof(float); const size_t maskSize = maskRows * maskColumns * sizeof(float); hipMalloc((void **) &deviceInputImageData, imageSize); hipMalloc((void **) &deviceOutputImageData, imageSize); hipMalloc((void **) &deviceMaskData, maskSize); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); hipMemcpy(deviceInputImageData, hostInputImageData, imageSize, hipMemcpyHostToDevice); hipMemcpy(deviceMaskData, hostMaskData, maskSize, hipMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); const int threads = 32; const dim3 blockSize(threads, threads, 1); const int gridX = (imageWidth + threads - 1) / threads; const int gridY = (imageHeight + threads - 1) / threads; const dim3 gridSize(gridX, gridY, 1); wbTime_start(Compute, "Doing the computation on the GPU"); hipLaunchKernelGGL(( convolution), dim3(gridSize), dim3(blockSize), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); hipDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); hipMemcpy(hostOutputImageData, deviceOutputImageData, imageSize, hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); hipFree(deviceInputImageData); hipFree(deviceMaskData); hipFree(deviceOutputImageData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); #if LAB_DEBUG system("pause"); #endif return 0; }
38665021ae7c3bb252fb4def9a2a85887650ae65.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "wb.h" #define MASK_WIDTH 5 #define MASK_RADIUS MASK_WIDTH / 2 __global__ void convolution(const float *__restrict__ I, const float *__restrict__ M, float *__restrict__ P, const int channels, const int width, const int height) { const unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; const unsigned int j = threadIdx.y + blockDim.y * blockIdx.y; if (i < width && j < height) { for (unsigned int c = 0; c < channels; c++) { float accumulator = 0; for (int y = -MASK_RADIUS; y <= MASK_RADIUS; y++) { for (int x = -MASK_RADIUS; x <= MASK_RADIUS; x++) { const int imgX = (int) i + x; const int imgY = (int) j + y; if ((imgX >= 0 && imgX < width) && (imgY >= 0 && imgY < height)) { unsigned const int imgXY = (imgX + imgY * width) * channels + c; const float imgPixel = I[imgXY]; const float maskValue = M[(x + MASK_RADIUS) + (y + MASK_RADIUS) * MASK_WIDTH]; accumulator += imgPixel * maskValue; } } } P[(i + j * width) * channels + c] = __saturatef(accumulator); } } } int main(int argc, char *argv[]) { wbArg_t arg; int maskRows; int maskColumns; int imageChannels; int imageWidth; int imageHeight; char *inputImageFile; char *inputMaskFile; wbImage_t inputImage; wbImage_t outputImage; float *hostInputImageData; float *hostOutputImageData; float *hostMaskData; float *deviceInputImageData; float *deviceOutputImageData; float *deviceMaskData; arg = wbArg_read(argc, argv); /* parse the input arguments */ inputImageFile = wbArg_getInputFile(arg, 0); inputMaskFile = wbArg_getInputFile(arg, 1); inputImage = wbImport(inputImageFile); hostMaskData = (float *) wbImport(inputMaskFile, &maskRows, &maskColumns); assert(maskRows == 5); /* mask height is fixed to 5 in this mp */ assert(maskColumns == 5); /* mask width is fixed to 5 in this mp */ imageWidth = wbImage_getWidth(inputImage); imageHeight = wbImage_getHeight(inputImage); imageChannels = wbImage_getChannels(inputImage); outputImage = wbImage_new(imageWidth, imageHeight, imageChannels); hostInputImageData = wbImage_getData(inputImage); hostOutputImageData = wbImage_getData(outputImage); wbTime_start(GPU, "Doing GPU Computation (memory + compute)"); wbTime_start(GPU, "Doing GPU memory allocation"); const size_t imageSize = imageWidth * imageHeight * imageChannels * sizeof(float); const size_t maskSize = maskRows * maskColumns * sizeof(float); cudaMalloc((void **) &deviceInputImageData, imageSize); cudaMalloc((void **) &deviceOutputImageData, imageSize); cudaMalloc((void **) &deviceMaskData, maskSize); wbTime_stop(GPU, "Doing GPU memory allocation"); wbTime_start(Copy, "Copying data to the GPU"); cudaMemcpy(deviceInputImageData, hostInputImageData, imageSize, cudaMemcpyHostToDevice); cudaMemcpy(deviceMaskData, hostMaskData, maskSize, cudaMemcpyHostToDevice); wbTime_stop(Copy, "Copying data to the GPU"); const int threads = 32; const dim3 blockSize(threads, threads, 1); const int gridX = (imageWidth + threads - 1) / threads; const int gridY = (imageHeight + threads - 1) / threads; const dim3 gridSize(gridX, gridY, 1); wbTime_start(Compute, "Doing the computation on the GPU"); convolution<<<gridSize, blockSize>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData, imageChannels, imageWidth, imageHeight); cudaDeviceSynchronize(); wbTime_stop(Compute, "Doing the computation on the GPU"); wbTime_start(Copy, "Copying data from the GPU"); cudaMemcpy(hostOutputImageData, deviceOutputImageData, imageSize, cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying data from the GPU"); wbTime_stop(GPU, "Doing GPU Computation (memory + compute)"); wbSolution(arg, outputImage); cudaFree(deviceInputImageData); cudaFree(deviceMaskData); cudaFree(deviceOutputImageData); free(hostMaskData); wbImage_delete(outputImage); wbImage_delete(inputImage); #if LAB_DEBUG system("pause"); #endif return 0; }
fc9ec54a85a05148e47f9d94fd22fe6e9f29057d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <iostream> #include <fstream> #include <chrono> void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol); void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol); __global__ void matrixMul(int* A_gpu, int* B_gpu, int* C_gpu, int N) { // Row i of matrix C int row = blockIdx.y * blockDim.y + threadIdx.y; // Column j of matrix C int col = blockIdx.x * blockDim.x + threadIdx.x; int accu = 0; if(row<N && col<N) { for(int k=0; k<N; k++) { accu = accu + A_gpu[row*N+k] * B_gpu[k*N+col]; } C_gpu[row*N+col] = accu; } } __global__ void matrixMulCol(int* A_gpu, int* B_gpu, int* C_gpu, int N) { // Row i of matrix C int row = blockIdx.y * blockDim.y + threadIdx.y; // Column j of matrix C int col = blockIdx.x * blockDim.x + threadIdx.x; int accu = 0; if(row<N && col<N) { for(int k=0; k<N; k++) { accu = accu + A_gpu[k*N+row] * B_gpu[k*N+col]; } C_gpu[row*N+col] = accu; } } void random_ints(int* x, int size) { srand(time(0)); int i; for (i=0;i<size;i++) { x[i]=rand()%10; //std::cout << x[i] << " "; } } void matrixMulCPU(int* A_cpu, int* B_cpu, int* C_cpu, int N) { for(int row=0; row<N; row++) { for(int col=0; col<N; col++){ C_cpu[row*N+col] = 0; for(int elm=0; elm<N; elm++) { C_cpu[row*N+col] = C_cpu[row*N+col] + A_cpu[row*N+elm] * B_cpu[elm*N+col]; } } } } int main(int argc, char* argv[]){ //int N = 3; int N = atoi(argv[1]); bool memCol = false; if (strcmp(argv[4],"MC")==0) { memCol=true; } int NN = N*N; //define A_cpu, B_cpu, C_cpu in the CPU memory int *A_cpu, *B_cpu, *C_cpu; int size = NN * sizeof(int); // Setup input values //std::cout << "A: "; A_cpu = (int*)malloc(size); random_ints(A_cpu, NN); //std::cout << "\nB: "; B_cpu = (int*)malloc(size); random_ints(B_cpu, NN); C_cpu = (int*)malloc(size); if (strcmp(argv[2],"gpu")==0) { if(strcmp(argv[3],"MT")==0) { gpuMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol); } else { gpuNoMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol); } } else { auto t1 = std::chrono::high_resolution_clock::now(); matrixMulCPU(A_cpu, B_cpu, C_cpu, N); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); std::cout << "N: " << N << "\tCPU time: " << duration << "us" << std::endl; } //std::cout << "\nC: " << C_cpu[0] << " " << C_cpu[1] << " " <<C_cpu[2] << " " << C_cpu[3] << " " << C_cpu[4] <<" " << C_cpu[7] <<" " << C_cpu[8] <<"\n"; free(A_cpu); free(B_cpu); free(C_cpu); return 0; } void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) { //define A_gpu, B_gpu, C_gpu in the GPU memory //std::cout << "\nMem Tr\n"; int *A_gpu, *B_gpu, *C_gpu; hipMalloc((void **)&A_gpu, size); hipMalloc((void **)&B_gpu, size); hipMalloc((void **)&C_gpu, size); dim3 dimBlock(16, 16); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y); float time = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if (memCol==true) { //std::cout << "MC\n"; hipEventRecord( start, 0 ); // Copy inputs to device hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrixMulCol), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N); //memcopy C_gpu to C_cpu hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost); //stop time hipEventRecord( stop, 0 ); } else { //std::cout << "nmc\n"; hipEventRecord( start, 0 ); // Copy inputs to device hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N); //memcopy C_gpu to C_cpu hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost); //stop time hipEventRecord( stop, 0 ); } hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); float microsec = (time)*1000; std::cout << "N: " << N << "\tMT\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl; } void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) { //define A_gpu, B_gpu, C_gpu in the GPU memory //std::cout << "\nNoMem Tr\n"; int *A_gpu, *B_gpu, *C_gpu; hipMalloc((void **)&A_gpu, size); hipMalloc((void **)&B_gpu, size); hipMalloc((void **)&C_gpu, size); // Copy inputs to device hipMemcpy(A_gpu, A_cpu, size, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B_cpu, size, hipMemcpyHostToDevice); dim3 dimBlock(16, 16); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y); float time = 0; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); if (memCol==true) { //std::cout << "MC\n"; hipEventRecord( start, 0 ); hipLaunchKernelGGL(( matrixMulCol), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N); hipEventRecord( stop, 0 ); } else { //std::cout << "nmc\n"; hipEventRecord( start, 0 ); hipLaunchKernelGGL(( matrixMul), dim3(dimGrid), dim3(dimBlock), 0, 0, A_gpu,B_gpu,C_gpu,N); hipEventRecord( stop, 0 ); } hipEventSynchronize( stop ); hipEventElapsedTime( &time, start, stop ); hipEventDestroy( start ); hipEventDestroy( stop ); //memcopy C_gpu to C_cpu hipMemcpy(C_cpu, C_gpu, size, hipMemcpyDeviceToHost); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); float microsec = (time)*1000; std::cout << "N: " << N << "\tnt\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl; }
fc9ec54a85a05148e47f9d94fd22fe6e9f29057d.cu
#include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <iostream> #include <fstream> #include <chrono> void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol); void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol); __global__ void matrixMul(int* A_gpu, int* B_gpu, int* C_gpu, int N) { // Row i of matrix C int row = blockIdx.y * blockDim.y + threadIdx.y; // Column j of matrix C int col = blockIdx.x * blockDim.x + threadIdx.x; int accu = 0; if(row<N && col<N) { for(int k=0; k<N; k++) { accu = accu + A_gpu[row*N+k] * B_gpu[k*N+col]; } C_gpu[row*N+col] = accu; } } __global__ void matrixMulCol(int* A_gpu, int* B_gpu, int* C_gpu, int N) { // Row i of matrix C int row = blockIdx.y * blockDim.y + threadIdx.y; // Column j of matrix C int col = blockIdx.x * blockDim.x + threadIdx.x; int accu = 0; if(row<N && col<N) { for(int k=0; k<N; k++) { accu = accu + A_gpu[k*N+row] * B_gpu[k*N+col]; } C_gpu[row*N+col] = accu; } } void random_ints(int* x, int size) { srand(time(0)); int i; for (i=0;i<size;i++) { x[i]=rand()%10; //std::cout << x[i] << " "; } } void matrixMulCPU(int* A_cpu, int* B_cpu, int* C_cpu, int N) { for(int row=0; row<N; row++) { for(int col=0; col<N; col++){ C_cpu[row*N+col] = 0; for(int elm=0; elm<N; elm++) { C_cpu[row*N+col] = C_cpu[row*N+col] + A_cpu[row*N+elm] * B_cpu[elm*N+col]; } } } } int main(int argc, char* argv[]){ //int N = 3; int N = atoi(argv[1]); bool memCol = false; if (strcmp(argv[4],"MC")==0) { memCol=true; } int NN = N*N; //define A_cpu, B_cpu, C_cpu in the CPU memory int *A_cpu, *B_cpu, *C_cpu; int size = NN * sizeof(int); // Setup input values //std::cout << "A: "; A_cpu = (int*)malloc(size); random_ints(A_cpu, NN); //std::cout << "\nB: "; B_cpu = (int*)malloc(size); random_ints(B_cpu, NN); C_cpu = (int*)malloc(size); if (strcmp(argv[2],"gpu")==0) { if(strcmp(argv[3],"MT")==0) { gpuMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol); } else { gpuNoMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol); } } else { auto t1 = std::chrono::high_resolution_clock::now(); matrixMulCPU(A_cpu, B_cpu, C_cpu, N); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); std::cout << "N: " << N << "\tCPU time: " << duration << "us" << std::endl; } //std::cout << "\nC: " << C_cpu[0] << " " << C_cpu[1] << " " <<C_cpu[2] << " " << C_cpu[3] << " " << C_cpu[4] <<" " << C_cpu[7] <<" " << C_cpu[8] <<"\n"; free(A_cpu); free(B_cpu); free(C_cpu); return 0; } void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) { //define A_gpu, B_gpu, C_gpu in the GPU memory //std::cout << "\nMem Tr\n"; int *A_gpu, *B_gpu, *C_gpu; cudaMalloc((void **)&A_gpu, size); cudaMalloc((void **)&B_gpu, size); cudaMalloc((void **)&C_gpu, size); dim3 dimBlock(16, 16); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y); float time = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (memCol==true) { //std::cout << "MC\n"; cudaEventRecord( start, 0 ); // Copy inputs to device cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice); matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N); //memcopy C_gpu to C_cpu cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost); //stop time cudaEventRecord( stop, 0 ); } else { //std::cout << "nmc\n"; cudaEventRecord( start, 0 ); // Copy inputs to device cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice); matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N); //memcopy C_gpu to C_cpu cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost); //stop time cudaEventRecord( stop, 0 ); } cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); float microsec = (time)*1000; std::cout << "N: " << N << "\tMT\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl; } void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) { //define A_gpu, B_gpu, C_gpu in the GPU memory //std::cout << "\nNoMem Tr\n"; int *A_gpu, *B_gpu, *C_gpu; cudaMalloc((void **)&A_gpu, size); cudaMalloc((void **)&B_gpu, size); cudaMalloc((void **)&C_gpu, size); // Copy inputs to device cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice); dim3 dimBlock(16, 16); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y); float time = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (memCol==true) { //std::cout << "MC\n"; cudaEventRecord( start, 0 ); matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N); cudaEventRecord( stop, 0 ); } else { //std::cout << "nmc\n"; cudaEventRecord( start, 0 ); matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N); cudaEventRecord( stop, 0 ); } cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); //memcopy C_gpu to C_cpu cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); float microsec = (time)*1000; std::cout << "N: " << N << "\tnt\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl; }
8cc4e5317abfecb6b1a42d7694cfa15b54ad2964.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Modified by DaveMc to illustrate one child per lane /* GK104-optimized variant of the "Persistent speculative while-while" kernel used in: "Understanding the Efficiency of Ray Traversal on GPUs", Timo Aila and Samuli Laine, Proc. High-Performance Graphics 2009 This variant fetches new work dynamically as soon as the warp occupancy drops below a pre-determined threshold. */ #include "CudaTracerKernels.hpp" //------------------------------------------------------------------------ #define STACK_SIZE 64 // Size of the traversal stack in local memory. #define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays extern "C" __device__ int g_warpCounter; // Work counter for persistent threads. //------------------------------------------------------------------------ extern "C" __global__ void queryConfig(void) { g_config.bvhLayout = BVHLayout_Compact2; g_config.blockWidth = 32; g_config.blockHeight = 4; g_config.usePersistentThreads = 1; } //------------------------------------------------------------------------ TRACE_FUNC { // Traversal stack in CUDA thread-local memory. int traversalStack[STACK_SIZE]; traversalStack[0] = EntrypointSentinel; // Bottom-most entry. // Live state during traversal, stored in registers. float origx, origy, origz; // Ray origin. char* stackPtr; // Current position in traversal stack. int leafAddr; // First postponed leaf, non-negative if none. int leafAddr2; // Second postponed leaf, non-negative if none. int nodeAddr = EntrypointSentinel; // Non-negative: current internal node, negative: second postponed leaf. int hitIndex; // Triangle index of the closest intersection, -1 if none. float hitT; // t-value of the closest intersection. float tmin; int rayidx; float oodx; float oody; float oodz; float dirx; float diry; float dirz; float idirx; float idiry; float idirz; // Initialize persistent threads. __shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer. // Persistent threads: fetch and process rays in a loop. do { const int tidx = threadIdx.x; volatile int& rayBase = nextRayArray[threadIdx.y]; // Fetch new rays from the global pool using lane 0. const bool terminated = nodeAddr == EntrypointSentinel; const unsigned int maskTerminated = __ballot(terminated); const int numTerminated = __popc(maskTerminated); const int idxTerminated = __popc(maskTerminated & ((1u << tidx) - 1)); if (terminated) { if (idxTerminated == 0) rayBase = atomicAdd(&g_warpCounter, numTerminated); rayidx = rayBase + idxTerminated; if (rayidx >= numRays) break; // Fetch ray. float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4); float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4); origx = o.x; origy = o.y; origz = o.z; tmin = o.w; dirx = d.x; diry = d.y; dirz = d.z; hitT = d.w; float ooeps = exp2f(-80.0f); // Avoid div by zero. idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x)); idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y)); idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z)); oodx = origx * idirx; oody = origy * idiry; oodz = origz * idirz; // Setup traversal. stackPtr = (char*)&traversalStack[0]; leafAddr = 0; // No postponed leaf. leafAddr2 = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. } // Traversal loop. while (nodeAddr != EntrypointSentinel) { // Traverse internal nodes until all SIMD lanes have found a leaf. // while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) while (unsigned int(nodeAddr) < unsigned int(EntrypointSentinel)) // functionally equivalent, but faster { // Fetch AABBs of the two child nodes. const float4 n0xy = tex1Dfetch(t_nodesA, nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) const float4 tmp = tex1Dfetch(t_nodesA, nodeAddr + 1); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) float2 n0z = *(float2*)&tmp; int2 cnodes = *(int2*)(&tmp)+1; // child_index0, child_index1 // Intersect the ray against the child nodes. const float c0lox = n0xy.x * idirx - oodx; const float c0hix = n0xy.y * idirx - oodx; const float c0loy = n0xy.z * idiry - oody; const float c0hiy = n0xy.w * idiry - oody; const float c0loz = n0z.x * idirz - oodz; const float c0hiz = n0z.y * idirz - oodz; const float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); const float c0max = spanEndKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); bool traverseChild0 = (c0max >= c0min); // Child was not intersected => pop stack. if (!traverseChild0) { nodeAddr = *(int*)stackPtr; stackPtr -= 4; } // Otherwise => fetch child pointers. else { nodeAddr = cnodes.x; // Push one child. stackPtr += 4; *(int*)stackPtr = cnodes.y; } // First leaf => postpone and continue traversal. if (nodeAddr < 0 && leafAddr >= 0) // Postpone max 1 // if (nodeAddr < 0 && leafAddr2 >= 0) // Postpone max 2 { //leafAddr2= leafAddr; // postpone 2 leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; stackPtr -= 4; } // All SIMD lanes have found a leaf? => process them. // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. unsigned int mask; asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; //if(!__any(leafAddr >= 0)) // break; } // Process postponed leaf nodes. while (leafAddr < 0) { for (int triAddr = ~leafAddr;; triAddr += 3) { // Tris in TEX (good to fetch as a single batch) const float4 v00 = tex1Dfetch(t_trisA, triAddr + 0); const float4 v11 = tex1Dfetch(t_trisA, triAddr + 1); const float4 v22 = tex1Dfetch(t_trisA, triAddr + 2); // End marker (negative zero) => all triangles processed. if (__float_as_int(v00.x) == 0x80000000) break; float Oz = v00.w - origx * v00.x - origy * v00.y - origz * v00.z; float invDz = 1.0f / (dirx*v00.x + diry * v00.y + dirz * v00.z); float t = Oz * invDz; if (t > tmin && t < hitT) { // Compute and check barycentric u. float Ox = v11.w + origx * v11.x + origy * v11.y + origz * v11.z; float Dx = dirx * v11.x + diry * v11.y + dirz * v11.z; float u = Ox + t * Dx; if (u >= 0.0f) { // Compute and check barycentric v. float Oy = v22.w + origx * v22.x + origy * v22.y + origz * v22.z; float Dy = dirx * v22.x + diry * v22.y + dirz * v22.z; float v = Oy + t * Dy; if (v >= 0.0f && u + v <= 1.0f) { // Record intersection. // Closest intersection not required => terminate. hitT = t; hitIndex = triAddr; if (anyHit) { nodeAddr = EntrypointSentinel; break; } } } } } // triangle // Another leaf was postponed => process it as well. // if(leafAddr2<0) { leafAddr = leafAddr2; leafAddr2=0; } else // postpone2 { leafAddr = nodeAddr; if (nodeAddr < 0) { nodeAddr = *(int*)stackPtr; stackPtr -= 4; } } } // leaf // DYNAMIC FETCH if (__popc(__ballot(true)) < DYNAMIC_FETCH_THRESHOLD) break; } // traversal // Remap intersected triangle index, and store the result. if (hitIndex == -1) { STORE_RESULT(rayidx, -1, hitT); } else { STORE_RESULT(rayidx, FETCH_TEXTURE(triIndices, hitIndex, int), hitT); } } while (true); } //------------------------------------------------------------------------
8cc4e5317abfecb6b1a42d7694cfa15b54ad2964.cu
// Modified by DaveMc to illustrate one child per lane /* GK104-optimized variant of the "Persistent speculative while-while" kernel used in: "Understanding the Efficiency of Ray Traversal on GPUs", Timo Aila and Samuli Laine, Proc. High-Performance Graphics 2009 This variant fetches new work dynamically as soon as the warp occupancy drops below a pre-determined threshold. */ #include "CudaTracerKernels.hpp" //------------------------------------------------------------------------ #define STACK_SIZE 64 // Size of the traversal stack in local memory. #define DYNAMIC_FETCH_THRESHOLD 20 // If fewer than this active, fetch new rays extern "C" __device__ int g_warpCounter; // Work counter for persistent threads. //------------------------------------------------------------------------ extern "C" __global__ void queryConfig(void) { g_config.bvhLayout = BVHLayout_Compact2; g_config.blockWidth = 32; g_config.blockHeight = 4; g_config.usePersistentThreads = 1; } //------------------------------------------------------------------------ TRACE_FUNC { // Traversal stack in CUDA thread-local memory. int traversalStack[STACK_SIZE]; traversalStack[0] = EntrypointSentinel; // Bottom-most entry. // Live state during traversal, stored in registers. float origx, origy, origz; // Ray origin. char* stackPtr; // Current position in traversal stack. int leafAddr; // First postponed leaf, non-negative if none. int leafAddr2; // Second postponed leaf, non-negative if none. int nodeAddr = EntrypointSentinel; // Non-negative: current internal node, negative: second postponed leaf. int hitIndex; // Triangle index of the closest intersection, -1 if none. float hitT; // t-value of the closest intersection. float tmin; int rayidx; float oodx; float oody; float oodz; float dirx; float diry; float dirz; float idirx; float idiry; float idirz; // Initialize persistent threads. __shared__ volatile int nextRayArray[MaxBlockHeight]; // Current ray index in global buffer. // Persistent threads: fetch and process rays in a loop. do { const int tidx = threadIdx.x; volatile int& rayBase = nextRayArray[threadIdx.y]; // Fetch new rays from the global pool using lane 0. const bool terminated = nodeAddr == EntrypointSentinel; const unsigned int maskTerminated = __ballot(terminated); const int numTerminated = __popc(maskTerminated); const int idxTerminated = __popc(maskTerminated & ((1u << tidx) - 1)); if (terminated) { if (idxTerminated == 0) rayBase = atomicAdd(&g_warpCounter, numTerminated); rayidx = rayBase + idxTerminated; if (rayidx >= numRays) break; // Fetch ray. float4 o = FETCH_GLOBAL(rays, rayidx * 2 + 0, float4); float4 d = FETCH_GLOBAL(rays, rayidx * 2 + 1, float4); origx = o.x; origy = o.y; origz = o.z; tmin = o.w; dirx = d.x; diry = d.y; dirz = d.z; hitT = d.w; float ooeps = exp2f(-80.0f); // Avoid div by zero. idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x)); idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y)); idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z)); oodx = origx * idirx; oody = origy * idiry; oodz = origz * idirz; // Setup traversal. stackPtr = (char*)&traversalStack[0]; leafAddr = 0; // No postponed leaf. leafAddr2 = 0; // No postponed leaf. nodeAddr = 0; // Start from the root. hitIndex = -1; // No triangle intersected so far. } // Traversal loop. while (nodeAddr != EntrypointSentinel) { // Traverse internal nodes until all SIMD lanes have found a leaf. // while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel) while (unsigned int(nodeAddr) < unsigned int(EntrypointSentinel)) // functionally equivalent, but faster { // Fetch AABBs of the two child nodes. const float4 n0xy = tex1Dfetch(t_nodesA, nodeAddr + 0); // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y) const float4 tmp = tex1Dfetch(t_nodesA, nodeAddr + 1); // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z) float2 n0z = *(float2*)&tmp; int2 cnodes = *(int2*)(&tmp)+1; // child_index0, child_index1 // Intersect the ray against the child nodes. const float c0lox = n0xy.x * idirx - oodx; const float c0hix = n0xy.y * idirx - oodx; const float c0loy = n0xy.z * idiry - oody; const float c0hiy = n0xy.w * idiry - oody; const float c0loz = n0z.x * idirz - oodz; const float c0hiz = n0z.y * idirz - oodz; const float c0min = spanBeginKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin); const float c0max = spanEndKepler(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT); bool traverseChild0 = (c0max >= c0min); // Child was not intersected => pop stack. if (!traverseChild0) { nodeAddr = *(int*)stackPtr; stackPtr -= 4; } // Otherwise => fetch child pointers. else { nodeAddr = cnodes.x; // Push one child. stackPtr += 4; *(int*)stackPtr = cnodes.y; } // First leaf => postpone and continue traversal. if (nodeAddr < 0 && leafAddr >= 0) // Postpone max 1 // if (nodeAddr < 0 && leafAddr2 >= 0) // Postpone max 2 { //leafAddr2= leafAddr; // postpone 2 leafAddr = nodeAddr; nodeAddr = *(int*)stackPtr; stackPtr -= 4; } // All SIMD lanes have found a leaf? => process them. // NOTE: inline PTX implementation of "if(!__any(leafAddr >= 0)) break;". // tried everything with CUDA 4.2 but always got several redundant instructions. unsigned int mask; asm("{\n" " .reg .pred p; \n" "setp.ge.s32 p, %1, 0; \n" "vote.ballot.b32 %0,p; \n" "}" : "=r"(mask) : "r"(leafAddr)); if (!mask) break; //if(!__any(leafAddr >= 0)) // break; } // Process postponed leaf nodes. while (leafAddr < 0) { for (int triAddr = ~leafAddr;; triAddr += 3) { // Tris in TEX (good to fetch as a single batch) const float4 v00 = tex1Dfetch(t_trisA, triAddr + 0); const float4 v11 = tex1Dfetch(t_trisA, triAddr + 1); const float4 v22 = tex1Dfetch(t_trisA, triAddr + 2); // End marker (negative zero) => all triangles processed. if (__float_as_int(v00.x) == 0x80000000) break; float Oz = v00.w - origx * v00.x - origy * v00.y - origz * v00.z; float invDz = 1.0f / (dirx*v00.x + diry * v00.y + dirz * v00.z); float t = Oz * invDz; if (t > tmin && t < hitT) { // Compute and check barycentric u. float Ox = v11.w + origx * v11.x + origy * v11.y + origz * v11.z; float Dx = dirx * v11.x + diry * v11.y + dirz * v11.z; float u = Ox + t * Dx; if (u >= 0.0f) { // Compute and check barycentric v. float Oy = v22.w + origx * v22.x + origy * v22.y + origz * v22.z; float Dy = dirx * v22.x + diry * v22.y + dirz * v22.z; float v = Oy + t * Dy; if (v >= 0.0f && u + v <= 1.0f) { // Record intersection. // Closest intersection not required => terminate. hitT = t; hitIndex = triAddr; if (anyHit) { nodeAddr = EntrypointSentinel; break; } } } } } // triangle // Another leaf was postponed => process it as well. // if(leafAddr2<0) { leafAddr = leafAddr2; leafAddr2=0; } else // postpone2 { leafAddr = nodeAddr; if (nodeAddr < 0) { nodeAddr = *(int*)stackPtr; stackPtr -= 4; } } } // leaf // DYNAMIC FETCH if (__popc(__ballot(true)) < DYNAMIC_FETCH_THRESHOLD) break; } // traversal // Remap intersected triangle index, and store the result. if (hitIndex == -1) { STORE_RESULT(rayidx, -1, hitT); } else { STORE_RESULT(rayidx, FETCH_TEXTURE(triIndices, hitIndex, int), hitT); } } while (true); } //------------------------------------------------------------------------
f209161d6c277e0522e0b927c52674683594105c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Kernel that does a up-sweep __global__ void kernUpSweep(int n, int levelPowerOne, int levelPower, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int divide = index / levelPowerOne; if (index - (divide * levelPowerOne) == 0) { odata[index + levelPowerOne - 1] += odata[index + levelPower - 1]; } } // Kernel that does a down-sweep __global__ void kernDownSweep(int n, int levelPowerPlusOne, int levelPower, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int divide = index / levelPowerPlusOne; if (index - (divide * levelPowerPlusOne) == 0) { int temp = odata[index + levelPower - 1]; odata[index + levelPower - 1] = odata[index + levelPowerPlusOne - 1]; odata[index + levelPowerPlusOne - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { scan(n, odata, idata, true); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata, const bool time) { // Initialize blockSize and fullBlocksPerGrid int blockSize = 128; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Initialize variables and device arrays int totalLevels = ilog2ceil(n); int arraySize = pow(2, totalLevels); // To handle non-power of two lengths int *dev_array; // Allocate device array. hipMalloc((void**) &dev_array, arraySize * sizeof(int)); checkCUDAError("hipMalloc dev_array failed!"); // Copy input data into dev_read hipMemcpy(dev_array, idata, sizeof(int) * n, hipMemcpyHostToDevice); if (time) { timer().startGpuTimer(); } // TODO // Go through the levels for Up Sweep for (unsigned int level = 0; level <= totalLevels; level++) { int levelPowerOne = pow(2, level + 1); int levelPower = pow(2, level); // invoke kernel kernUpSweep << <fullBlocksPerGrid, blockSize >> >(n, levelPowerOne, levelPower, dev_array); } // Copy values to a temporary array int* temp_array = new int[arraySize]; hipMemcpy(temp_array, dev_array, sizeof(int) * arraySize, hipMemcpyDeviceToHost); // Set the last element to zero temp_array[arraySize - 1] = 0; // Copy array back to GPU hipMemcpy(dev_array, temp_array, sizeof(int) * arraySize, hipMemcpyHostToDevice); // Go through the levels for Down Sweep for (int level = totalLevels - 1; level >= 0; level--) { int levelPowerPlusOne = pow(2, level + 1); int levelPower = pow(2, level); // invoke kernel kernDownSweep << <fullBlocksPerGrid, blockSize >> >(n, levelPowerPlusOne, levelPower, dev_array); } // Copy data from GPU to output array hipMemcpy(odata, dev_array, sizeof(int) * n, hipMemcpyDeviceToHost); if (time) { timer().endGpuTimer(); } // Free memory hipFree(dev_array); delete temp_array; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Device arrays int *dev_inData; int *dev_outData; int *dev_bool; int *dev_scan; // Allocate device array. hipMalloc((void**) &dev_inData, n * sizeof(int)); checkCUDAError("hipMalloc dev_inData failed!"); hipMalloc((void**) &dev_outData, n * sizeof(int)); checkCUDAError("hipMalloc dev_outData failed!"); hipMalloc((void**) &dev_bool, n * sizeof(int)); checkCUDAError("hipMalloc dev_bool failed!"); hipMalloc((void**) &dev_scan, n * sizeof(int)); checkCUDAError("hipMalloc dev_scan failed!"); timer().startGpuTimer(); // TODO // Map to booleans hipMemcpy(dev_inData, idata, sizeof(int) * n, hipMemcpyHostToDevice); Common::kernMapToBoolean << < fullBlocksPerGrid, blockSize >> > (n, dev_bool, dev_inData); // Create host arrays that will be passed into scan int *scan_inData = new int[n]; int *scan_outData = new int[n]; hipMemcpy(scan_inData, dev_bool, sizeof(int) * n, hipMemcpyDeviceToHost); bool lastOne = scan_inData[n - 1]; // Remember if last bool is a 1. Will be used later. // Scan scan(n, scan_outData, scan_inData, false); // Use result from scan to find how many elements are compacted int count = scan_outData[n - 1]; if (lastOne) { count++; } // Copy scan result to device hipMemcpy(dev_scan, scan_outData, sizeof(int) * n, hipMemcpyHostToDevice); // Perform scatter Common::kernScatter << < fullBlocksPerGrid, blockSize >> > (n, dev_outData, dev_inData, dev_bool, dev_scan); // Copy result to CPU hipMemcpy(odata, dev_outData, sizeof(int) * n, hipMemcpyDeviceToHost); timer().endGpuTimer(); // Free memory hipFree(dev_inData); hipFree(dev_bool); hipFree(dev_scan); delete scan_inData; delete scan_outData; return count; } } }
f209161d6c277e0522e0b927c52674683594105c.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } // Kernel that does a up-sweep __global__ void kernUpSweep(int n, int levelPowerOne, int levelPower, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int divide = index / levelPowerOne; if (index - (divide * levelPowerOne) == 0) { odata[index + levelPowerOne - 1] += odata[index + levelPower - 1]; } } // Kernel that does a down-sweep __global__ void kernDownSweep(int n, int levelPowerPlusOne, int levelPower, int *odata) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; int divide = index / levelPowerPlusOne; if (index - (divide * levelPowerPlusOne) == 0) { int temp = odata[index + levelPower - 1]; odata[index + levelPower - 1] = odata[index + levelPowerPlusOne - 1]; odata[index + levelPowerPlusOne - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { scan(n, odata, idata, true); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata, const bool time) { // Initialize blockSize and fullBlocksPerGrid int blockSize = 128; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Initialize variables and device arrays int totalLevels = ilog2ceil(n); int arraySize = pow(2, totalLevels); // To handle non-power of two lengths int *dev_array; // Allocate device array. cudaMalloc((void**) &dev_array, arraySize * sizeof(int)); checkCUDAError("cudaMalloc dev_array failed!"); // Copy input data into dev_read cudaMemcpy(dev_array, idata, sizeof(int) * n, cudaMemcpyHostToDevice); if (time) { timer().startGpuTimer(); } // TODO // Go through the levels for Up Sweep for (unsigned int level = 0; level <= totalLevels; level++) { int levelPowerOne = pow(2, level + 1); int levelPower = pow(2, level); // invoke kernel kernUpSweep << <fullBlocksPerGrid, blockSize >> >(n, levelPowerOne, levelPower, dev_array); } // Copy values to a temporary array int* temp_array = new int[arraySize]; cudaMemcpy(temp_array, dev_array, sizeof(int) * arraySize, cudaMemcpyDeviceToHost); // Set the last element to zero temp_array[arraySize - 1] = 0; // Copy array back to GPU cudaMemcpy(dev_array, temp_array, sizeof(int) * arraySize, cudaMemcpyHostToDevice); // Go through the levels for Down Sweep for (int level = totalLevels - 1; level >= 0; level--) { int levelPowerPlusOne = pow(2, level + 1); int levelPower = pow(2, level); // invoke kernel kernDownSweep << <fullBlocksPerGrid, blockSize >> >(n, levelPowerPlusOne, levelPower, dev_array); } // Copy data from GPU to output array cudaMemcpy(odata, dev_array, sizeof(int) * n, cudaMemcpyDeviceToHost); if (time) { timer().endGpuTimer(); } // Free memory cudaFree(dev_array); delete temp_array; } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { int blockSize = 256; dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Device arrays int *dev_inData; int *dev_outData; int *dev_bool; int *dev_scan; // Allocate device array. cudaMalloc((void**) &dev_inData, n * sizeof(int)); checkCUDAError("cudaMalloc dev_inData failed!"); cudaMalloc((void**) &dev_outData, n * sizeof(int)); checkCUDAError("cudaMalloc dev_outData failed!"); cudaMalloc((void**) &dev_bool, n * sizeof(int)); checkCUDAError("cudaMalloc dev_bool failed!"); cudaMalloc((void**) &dev_scan, n * sizeof(int)); checkCUDAError("cudaMalloc dev_scan failed!"); timer().startGpuTimer(); // TODO // Map to booleans cudaMemcpy(dev_inData, idata, sizeof(int) * n, cudaMemcpyHostToDevice); Common::kernMapToBoolean << < fullBlocksPerGrid, blockSize >> > (n, dev_bool, dev_inData); // Create host arrays that will be passed into scan int *scan_inData = new int[n]; int *scan_outData = new int[n]; cudaMemcpy(scan_inData, dev_bool, sizeof(int) * n, cudaMemcpyDeviceToHost); bool lastOne = scan_inData[n - 1]; // Remember if last bool is a 1. Will be used later. // Scan scan(n, scan_outData, scan_inData, false); // Use result from scan to find how many elements are compacted int count = scan_outData[n - 1]; if (lastOne) { count++; } // Copy scan result to device cudaMemcpy(dev_scan, scan_outData, sizeof(int) * n, cudaMemcpyHostToDevice); // Perform scatter Common::kernScatter << < fullBlocksPerGrid, blockSize >> > (n, dev_outData, dev_inData, dev_bool, dev_scan); // Copy result to CPU cudaMemcpy(odata, dev_outData, sizeof(int) * n, cudaMemcpyDeviceToHost); timer().endGpuTimer(); // Free memory cudaFree(dev_inData); cudaFree(dev_bool); cudaFree(dev_scan); delete scan_inData; delete scan_outData; return count; } } }
73d7e64ecf6f32d499c960ade2a6fcc46e970d47.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LinearLayer.h" #include "WeightFactory.h" #include "Global.h" #include "cublasWrapper.h" namespace cytonLib { Variable* LinearLayer::init(string tag_, Variable* x_, int dimOutput_, bool biased_, WeightFactory* weightFactory_) { this->tag=tag_; this->x=x_; this->base=NULL; WeightFactory* pWF=weightFactory_; if(pWF==NULL) { pWF=&weightFactory; } dimInput=x->c * x->h * x->w; dimOutput=dimOutput_; biased=biased_; y.resize(x->n, dimOutput, 1, 1); y.enlarge=false; pWF->create(w, tag+".w", dimInput, dimOutput); if(biased) { pWF->create(b, tag+".b", dimOutput, 1); } return &y; } Variable* LinearLayer::init(string tag_, LinearLayer* base_, Variable* x_) { this->tag=tag_; this->x=x_; this->base=base_; this->addGrad=base->addGrad; dimInput=x->c * x->h * x->w; assert(dimInput==base->dimInput); dimOutput=base->dimOutput; biased=base->biased; y.resize(x->n, dimOutput, 1, 1); y.enlarge=false; return &y; } void LinearLayer::forward() { if(base!=NULL) { w.set(base->w); if(biased) { b.set(base->b); } base=NULL; } int num=x->n; assert(x->c*x->h*x->w == dimInput); y.resize(x->n, dimOutput, 1, 1); checkError(cublasXgemm(global.cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_N, dimOutput, num, dimInput, &global.one, w.data, w.ni, x->data, dimInput, &global.zero, y.data, dimOutput)); if(biased) { checkError(cublasXgemm(global.cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, dimOutput, num, 1, &global.one, b.data, b.ni, global.ones(num), 1, &global.one, y.data, dimOutput)); } } void LinearLayer::backward() { int num=x->n; Precision* beta=addGrad?&global.one:&global.zero; checkError(cublasXgemm(global.cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, dimInput, num, dimOutput, &global.one, w.data, w.ni, y.grad.data, dimOutput, beta, x->grad.data, dimInput)); } void LinearLayer::calculateGradient() { int num=x->n; checkError(cublasXgemm(global.cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_T, dimInput, dimOutput, num, &global.one, x->data, dimInput, y.grad.data, dimOutput, &global.one, w.grad.data, w.grad.ni)); if(biased) { checkError(cublasXgemm(global.cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, dimOutput, 1, num, &global.one, y.grad.data, dimOutput, global.ones(num), num, &global.one, b.grad.data, b.ni)); } } } /* namespace cytonLib */
73d7e64ecf6f32d499c960ade2a6fcc46e970d47.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LinearLayer.h" #include "WeightFactory.h" #include "Global.h" #include "cublasWrapper.h" namespace cytonLib { Variable* LinearLayer::init(string tag_, Variable* x_, int dimOutput_, bool biased_, WeightFactory* weightFactory_) { this->tag=tag_; this->x=x_; this->base=NULL; WeightFactory* pWF=weightFactory_; if(pWF==NULL) { pWF=&weightFactory; } dimInput=x->c * x->h * x->w; dimOutput=dimOutput_; biased=biased_; y.resize(x->n, dimOutput, 1, 1); y.enlarge=false; pWF->create(w, tag+".w", dimInput, dimOutput); if(biased) { pWF->create(b, tag+".b", dimOutput, 1); } return &y; } Variable* LinearLayer::init(string tag_, LinearLayer* base_, Variable* x_) { this->tag=tag_; this->x=x_; this->base=base_; this->addGrad=base->addGrad; dimInput=x->c * x->h * x->w; assert(dimInput==base->dimInput); dimOutput=base->dimOutput; biased=base->biased; y.resize(x->n, dimOutput, 1, 1); y.enlarge=false; return &y; } void LinearLayer::forward() { if(base!=NULL) { w.set(base->w); if(biased) { b.set(base->b); } base=NULL; } int num=x->n; assert(x->c*x->h*x->w == dimInput); y.resize(x->n, dimOutput, 1, 1); checkError(cublasXgemm(global.cublasHandle, CUBLAS_OP_T, CUBLAS_OP_N, dimOutput, num, dimInput, &global.one, w.data, w.ni, x->data, dimInput, &global.zero, y.data, dimOutput)); if(biased) { checkError(cublasXgemm(global.cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, dimOutput, num, 1, &global.one, b.data, b.ni, global.ones(num), 1, &global.one, y.data, dimOutput)); } } void LinearLayer::backward() { int num=x->n; Precision* beta=addGrad?&global.one:&global.zero; checkError(cublasXgemm(global.cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, dimInput, num, dimOutput, &global.one, w.data, w.ni, y.grad.data, dimOutput, beta, x->grad.data, dimInput)); } void LinearLayer::calculateGradient() { int num=x->n; checkError(cublasXgemm(global.cublasHandle, CUBLAS_OP_N, CUBLAS_OP_T, dimInput, dimOutput, num, &global.one, x->data, dimInput, y.grad.data, dimOutput, &global.one, w.grad.data, w.grad.ni)); if(biased) { checkError(cublasXgemm(global.cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, dimOutput, 1, num, &global.one, y.grad.data, dimOutput, global.ones(num), num, &global.one, b.grad.data, b.ni)); } } } /* namespace cytonLib */
55551c3c96758a4c94f8f963d902b036d413d033.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "./common/book.h" #include <time.h> #include <stdio.h> #include <iostream> #include <stdlib.h> #define MUERTA "\x1b[34m" #define VIVA "\x1b[36m" #define RESET "\x1b[0m" hipError_t lanzarKernel(char* matriz, char* matrizResultado, int fila, int columna); __global__ void movimientoCelularBloque(char* matriz, char* matrizResultado, int fila, int columna); void imprimirMatriz(char* matriz, int dimension, int columna); void rellenarMatriz(char* matriz, int dimension); int contarVivas(char* matriz, int dimension); int numeroBloques(int dimension, int width); int main(int arg, char* argv[]) { //Comprueba que haya solo el numero de argumento permitidos if (arg != 4) { printf("\nERROR: El numero de argumentos es erroneo (.exe <-a/-m> <fila> <columna>)\n"); } else { //Conversion de argumentos a int char* filaPuntero = argv[2]; int fila = atoi(filaPuntero); char* columnaPuntero = argv[3]; int columna = atoi(columnaPuntero); //Inicializamos hipDeviceProp_t para coger las propiedades de la tarjeta hipDeviceProp_t propiedades; HANDLE_ERROR(hipGetDeviceProperties(&propiedades, 0)); //Dimension de la matriz int dimension = columna * fila; //Matrices char* matriz = NULL; char* matrizResultado = NULL; matriz = (char*)malloc(sizeof(char) * dimension); matrizResultado = (char*)malloc(sizeof(char) * dimension); //Booleano para saber si el usuario quiere manual o automatico, por defecto automatico bool manual = false; //Comprueba que los numeros de columna y fila son correctos if (columna <= 0 | fila <= 0) { printf("\nERROR: La fila/columna tiene que ser un entero positivo.\n"); } //Comprueba que se haya introducido el parametro de ejecucion correcto else if ((strcmp("-m", argv[1]) & strcmp("-a", argv[1])) != 0) { printf("\nERROR: Argumentos validos solo -m[manual] o -a[automatico]\n"); } //Una vez comprobado todo empezamos con la ejecucion else { printf("\n[Matriz(%dx%d) Dimension(%d)] [modo: %s] \n", fila, columna, dimension, argv[1]); if (strcmp("-m", argv[1]) == 0) { manual = true; } //Rellenamos el tablero con celulas muertas y vivas rellenarMatriz(matriz, dimension); int generaciones = 1; int vivas = 1; while (vivas != dimension && vivas != 0) { system("CLS"); if (generaciones == 1) { lanzarKernel(matriz, matrizResultado, fila, columna); } else { lanzarKernel(matrizResultado, matrizResultado, fila, columna); } vivas = contarVivas(matrizResultado, dimension); printf("\nGeneracion: %d\n", generaciones); printf("Celulas vivas: %d\n", vivas); imprimirMatriz(matrizResultado, dimension, columna); if (manual) { system("pause"); } else { Sleep(1000); } generaciones++; } } //Liberamos los arrays free(matriz); free(matrizResultado); } } hipError_t lanzarKernel(char* matriz, char* matrizResultado, int fila, int columna) { //Punteros a las matrices que se meten por el kernel char* matriz_d; char* matrizResultado_d; int dimension = fila * columna; //Propiedades del dispositivo hipDeviceProp_t propiedades; HANDLE_ERROR(hipGetDeviceProperties(&propiedades, 0)); hipError_t cudaStatus; //Variables para el tamano de los bloques y del grid int tileWidthx = fila; int tileWidthy = columna; int bloquesx = 1; int bloquesy = 1; //si supera el numero de hilos dividimos la matriz en mas de un bloque if (dimension > propiedades.maxThreadsPerBlock) { int anchoTesela = sqrt(propiedades.maxThreadsPerBlock); bloquesx = numeroBloques(fila, anchoTesela); bloquesy = numeroBloques(columna, anchoTesela); tileWidthx = anchoTesela; tileWidthy = anchoTesela; } //Dimension del bloque y grid dim3 dimGrid(bloquesx, bloquesy); dim3 dimBlock(tileWidthx, tileWidthy); //Seleccionamos el device cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice fallo: Tienes una GPU instalada?"); goto Error; } //Reservamos las memorias cudaStatus = hipMalloc((void**)&matriz_d, dimension * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc matriz_d fallo."); goto Error; } cudaStatus = hipMalloc((void**)&matrizResultado_d, dimension * sizeof(char)); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: hipMalloc matrizResultado_d fallo."); goto Error; } //Copiamos las matrices que entran por parametro cudaStatus = hipMemcpy(matriz_d, matriz, dimension * sizeof(char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy matriz a matriz_d fallo."); goto Error; } cudaStatus = hipMemcpy(matrizResultado_d, matrizResultado, dimension * sizeof(char), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy matrizResultado a matrizResultado_d fallo."); goto Error; } //Lanzamos el kernel movimientoCelularBloque << < dimGrid, dimBlock >> > (matriz_d, matrizResultado_d, fila, columna); //Miramos los errores al lanzar el kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: lanzamiento de kernel fallo: %s\n", hipGetErrorString(cudaStatus)); goto Error; } //Miramos errores despues de lanzar el kernel cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: el kernel fallo con codigo %d\n", cudaStatus); goto Error; } //Copiamos el resultado en nuestra matriz cudaStatus = hipMemcpy(matrizResultado, matrizResultado_d, dimension * sizeof(char), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "ERROR: hipMemcpy matrizResultado_d a matrizResultado fallo."); goto Error; } Error: hipFree(matriz_d); hipFree(matrizResultado_d); return cudaStatus; } __global__ void movimientoCelularBloque(char* matriz, char* matrizResultado, int fila, int columna) { int filaPos = blockIdx.x * blockDim.x + threadIdx.x; int columnaPos = blockIdx.y * blockDim.y + threadIdx.y; int posicion = filaPos * columna + columnaPos; int contador = 0; //Primera fila 0x if (filaPos == 0) { //Posicion esquina ariba izquierda 0x0 if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } } //Posicion esquina superior derecha else if (columnaPos == (columna - 1)) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //Posicion en la primera fila sin contar esquinas else { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } } } //Ulima fila finalXx else if (filaPos == (fila - 1)) { //Posicion esquina abajo izquierda if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } //Posicion esquina abajo derecha else if (columnaPos == (columna - 1)) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } } //Posiciones ultima fila entre esquinas else { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } } //Primera columna entre las dos esquinas izquierdas else if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } //Ultima colunmna xfinalY else if (columnaPos == columna - 1) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //Posiciones fuera de los margenes else { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //VIVA if (matriz[posicion] == 'X') { if (contador == 2 || contador == 3) { matrizResultado[posicion] = 'X'; } else { matrizResultado[posicion] = 'O'; } } //MUERTA else { if (contador == 3) { matrizResultado[posicion] = 'X'; } else { matrizResultado[posicion] = 'O'; } } } void imprimirMatriz(char* matriz, int dimension, int columna) { for (int i = 0; i < dimension; i++) { if (matriz[i] == 'X') { printf(VIVA " X " RESET); } else { printf(MUERTA " O " RESET); } if ((i + 1) % columna == 0) { printf("\n"); } } } int contarVivas(char* matriz, int dimension) { int contador = 0; for (int i = 0; i < dimension; i++) { if (matriz[i] == 'X') { contador++; } } return contador; } void rellenarMatriz(char* matriz, int dimension) { srand(time(0)); for (int i = 0; i < dimension; i++) { char* celula = matriz + i; int random = rand() % dimension + 1; //Creacion del tablero en funcion de la dimension de este if (dimension <= 40) { if (random % 2 == 0) { *celula = 'X'; } else { *celula = 'O'; } } else if (dimension > 40) { if (random % 3 == 0 && random % 2 == 0) { *celula = 'X'; } else { *celula = 'O'; } } } } int numeroBloques(int dimension, int width) { int resultado = 0; if (dimension % width == 0) { resultado = dimension / width; } else { resultado = (dimension / width) + 1; } return resultado; }
55551c3c96758a4c94f8f963d902b036d413d033.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include "./common/book.h" #include <time.h> #include <stdio.h> #include <iostream> #include <stdlib.h> #define MUERTA "\x1b[34m" #define VIVA "\x1b[36m" #define RESET "\x1b[0m" cudaError_t lanzarKernel(char* matriz, char* matrizResultado, int fila, int columna); __global__ void movimientoCelularBloque(char* matriz, char* matrizResultado, int fila, int columna); void imprimirMatriz(char* matriz, int dimension, int columna); void rellenarMatriz(char* matriz, int dimension); int contarVivas(char* matriz, int dimension); int numeroBloques(int dimension, int width); int main(int arg, char* argv[]) { //Comprueba que haya solo el numero de argumento permitidos if (arg != 4) { printf("\nERROR: El numero de argumentos es erroneo (.exe <-a/-m> <fila> <columna>)\n"); } else { //Conversion de argumentos a int char* filaPuntero = argv[2]; int fila = atoi(filaPuntero); char* columnaPuntero = argv[3]; int columna = atoi(columnaPuntero); //Inicializamos cudaDeviceProp para coger las propiedades de la tarjeta cudaDeviceProp propiedades; HANDLE_ERROR(cudaGetDeviceProperties(&propiedades, 0)); //Dimension de la matriz int dimension = columna * fila; //Matrices char* matriz = NULL; char* matrizResultado = NULL; matriz = (char*)malloc(sizeof(char) * dimension); matrizResultado = (char*)malloc(sizeof(char) * dimension); //Booleano para saber si el usuario quiere manual o automatico, por defecto automatico bool manual = false; //Comprueba que los numeros de columna y fila son correctos if (columna <= 0 | fila <= 0) { printf("\nERROR: La fila/columna tiene que ser un entero positivo.\n"); } //Comprueba que se haya introducido el parametro de ejecucion correcto else if ((strcmp("-m", argv[1]) & strcmp("-a", argv[1])) != 0) { printf("\nERROR: Argumentos validos solo -m[manual] o -a[automatico]\n"); } //Una vez comprobado todo empezamos con la ejecucion else { printf("\n[Matriz(%dx%d) Dimension(%d)] [modo: %s] \n", fila, columna, dimension, argv[1]); if (strcmp("-m", argv[1]) == 0) { manual = true; } //Rellenamos el tablero con celulas muertas y vivas rellenarMatriz(matriz, dimension); int generaciones = 1; int vivas = 1; while (vivas != dimension && vivas != 0) { system("CLS"); if (generaciones == 1) { lanzarKernel(matriz, matrizResultado, fila, columna); } else { lanzarKernel(matrizResultado, matrizResultado, fila, columna); } vivas = contarVivas(matrizResultado, dimension); printf("\nGeneracion: %d\n", generaciones); printf("Celulas vivas: %d\n", vivas); imprimirMatriz(matrizResultado, dimension, columna); if (manual) { system("pause"); } else { Sleep(1000); } generaciones++; } } //Liberamos los arrays free(matriz); free(matrizResultado); } } cudaError_t lanzarKernel(char* matriz, char* matrizResultado, int fila, int columna) { //Punteros a las matrices que se meten por el kernel char* matriz_d; char* matrizResultado_d; int dimension = fila * columna; //Propiedades del dispositivo cudaDeviceProp propiedades; HANDLE_ERROR(cudaGetDeviceProperties(&propiedades, 0)); cudaError_t cudaStatus; //Variables para el tamano de los bloques y del grid int tileWidthx = fila; int tileWidthy = columna; int bloquesx = 1; int bloquesy = 1; //si supera el numero de hilos dividimos la matriz en mas de un bloque if (dimension > propiedades.maxThreadsPerBlock) { int anchoTesela = sqrt(propiedades.maxThreadsPerBlock); bloquesx = numeroBloques(fila, anchoTesela); bloquesy = numeroBloques(columna, anchoTesela); tileWidthx = anchoTesela; tileWidthy = anchoTesela; } //Dimension del bloque y grid dim3 dimGrid(bloquesx, bloquesy); dim3 dimBlock(tileWidthx, tileWidthy); //Seleccionamos el device cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice fallo: Tienes una GPU instalada?"); goto Error; } //Reservamos las memorias cudaStatus = cudaMalloc((void**)&matriz_d, dimension * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc matriz_d fallo."); goto Error; } cudaStatus = cudaMalloc((void**)&matrizResultado_d, dimension * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc matrizResultado_d fallo."); goto Error; } //Copiamos las matrices que entran por parametro cudaStatus = cudaMemcpy(matriz_d, matriz, dimension * sizeof(char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy matriz a matriz_d fallo."); goto Error; } cudaStatus = cudaMemcpy(matrizResultado_d, matrizResultado, dimension * sizeof(char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy matrizResultado a matrizResultado_d fallo."); goto Error; } //Lanzamos el kernel movimientoCelularBloque << < dimGrid, dimBlock >> > (matriz_d, matrizResultado_d, fila, columna); //Miramos los errores al lanzar el kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: lanzamiento de kernel fallo: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } //Miramos errores despues de lanzar el kernel cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: el kernel fallo con codigo %d\n", cudaStatus); goto Error; } //Copiamos el resultado en nuestra matriz cudaStatus = cudaMemcpy(matrizResultado, matrizResultado_d, dimension * sizeof(char), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy matrizResultado_d a matrizResultado fallo."); goto Error; } Error: cudaFree(matriz_d); cudaFree(matrizResultado_d); return cudaStatus; } __global__ void movimientoCelularBloque(char* matriz, char* matrizResultado, int fila, int columna) { int filaPos = blockIdx.x * blockDim.x + threadIdx.x; int columnaPos = blockIdx.y * blockDim.y + threadIdx.y; int posicion = filaPos * columna + columnaPos; int contador = 0; //Primera fila 0x if (filaPos == 0) { //Posicion esquina ariba izquierda 0x0 if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } } //Posicion esquina superior derecha else if (columnaPos == (columna - 1)) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //Posicion en la primera fila sin contar esquinas else { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } } } //Ulima fila finalXx else if (filaPos == (fila - 1)) { //Posicion esquina abajo izquierda if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } //Posicion esquina abajo derecha else if (columnaPos == (columna - 1)) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } } //Posiciones ultima fila entre esquinas else { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } } //Primera columna entre las dos esquinas izquierdas else if (columnaPos == 0) { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } } //Ultima colunmna xfinalY else if (columnaPos == columna - 1) { if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //Posiciones fuera de los margenes else { if ((matriz[posicion + 1]) == 'X') { contador++; } if ((matriz[posicion - 1]) == 'X') { contador++; } if ((matriz[posicion + columna]) == 'X') { contador++; } if ((matriz[posicion - columna]) == 'X') { contador++; } if ((matriz[posicion - (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion - (columna - 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna + 1)]) == 'X') { contador++; } if ((matriz[posicion + (columna - 1)]) == 'X') { contador++; } } //VIVA if (matriz[posicion] == 'X') { if (contador == 2 || contador == 3) { matrizResultado[posicion] = 'X'; } else { matrizResultado[posicion] = 'O'; } } //MUERTA else { if (contador == 3) { matrizResultado[posicion] = 'X'; } else { matrizResultado[posicion] = 'O'; } } } void imprimirMatriz(char* matriz, int dimension, int columna) { for (int i = 0; i < dimension; i++) { if (matriz[i] == 'X') { printf(VIVA " X " RESET); } else { printf(MUERTA " O " RESET); } if ((i + 1) % columna == 0) { printf("\n"); } } } int contarVivas(char* matriz, int dimension) { int contador = 0; for (int i = 0; i < dimension; i++) { if (matriz[i] == 'X') { contador++; } } return contador; } void rellenarMatriz(char* matriz, int dimension) { srand(time(0)); for (int i = 0; i < dimension; i++) { char* celula = matriz + i; int random = rand() % dimension + 1; //Creacion del tablero en funcion de la dimension de este if (dimension <= 40) { if (random % 2 == 0) { *celula = 'X'; } else { *celula = 'O'; } } else if (dimension > 40) { if (random % 3 == 0 && random % 2 == 0) { *celula = 'X'; } else { *celula = 'O'; } } } } int numeroBloques(int dimension, int width) { int resultado = 0; if (dimension % width == 0) { resultado = dimension / width; } else { resultado = (dimension / width) + 1; } return resultado; }
21d0d58df8eb760d9b7c8fa6d561a61cb8aa1a1b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_As_mul_Bs_32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int mx = 1; int ns = 1; float *xval = NULL; hipMalloc(&xval, XSIZE*YSIZE); int *xrow = NULL; hipMalloc(&xrow, XSIZE*YSIZE); int *xcol = NULL; hipMalloc(&xcol, XSIZE*YSIZE); float *sval = NULL; hipMalloc(&sval, XSIZE*YSIZE); int *srow = NULL; hipMalloc(&srow, XSIZE*YSIZE); int *scol = NULL; hipMalloc(&scol, XSIZE*YSIZE); float *k = NULL; hipMalloc(&k, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _As_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,xval,xrow,xcol,sval,srow,scol,k); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _As_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,xval,xrow,xcol,sval,srow,scol,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _As_mul_Bs_32), dim3(gridBlock),dim3(threadBlock), 0, 0, mx,ns,xval,xrow,xcol,sval,srow,scol,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
21d0d58df8eb760d9b7c8fa6d561a61cb8aa1a1b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_As_mul_Bs_32.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int mx = 1; int ns = 1; float *xval = NULL; cudaMalloc(&xval, XSIZE*YSIZE); int *xrow = NULL; cudaMalloc(&xrow, XSIZE*YSIZE); int *xcol = NULL; cudaMalloc(&xcol, XSIZE*YSIZE); float *sval = NULL; cudaMalloc(&sval, XSIZE*YSIZE); int *srow = NULL; cudaMalloc(&srow, XSIZE*YSIZE); int *scol = NULL; cudaMalloc(&scol, XSIZE*YSIZE); float *k = NULL; cudaMalloc(&k, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _As_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,xval,xrow,xcol,sval,srow,scol,k); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _As_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,xval,xrow,xcol,sval,srow,scol,k); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _As_mul_Bs_32<<<gridBlock,threadBlock>>>(mx,ns,xval,xrow,xcol,sval,srow,scol,k); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
07c1e99d47d18d8b08ee504fa15e4288ac081e11.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * yolo_layer.cu * * This code was originally written by wang-xinyu under MIT license. * I took it from: * * https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4 * * and made necessary modifications. * * - JK Jung */ #include "yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); assert(d == a + length); } IPluginV2IOExt* YoloLayerPlugin::clone() const NOEXCEPT { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } void YoloLayerPlugin::terminate() NOEXCEPT { CHECK(hipFree(mAnchors)); } size_t YoloLayerPlugin::getSerializationSize() const NOEXCEPT { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } void YoloLayerPlugin::serialize(void* buffer) const NOEXCEPT { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) NOEXCEPT { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(hipMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { hipLaunchKernelGGL(( CalDetection_NewCoords), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { hipLaunchKernelGGL(( CalDetection), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } #if NV_TENSORRT_MAJOR >= 8 int32_t YoloLayerPlugin::enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) NOEXCEPT #else // NV_TENSORRT_MAJOR < 8 int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream) #endif // NV_TENSORRT_MAJOR { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) NOEXCEPT { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 64 || input_multiplier == 32 || \ input_multiplier == 16 || input_multiplier == 8); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) NOEXCEPT { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; } // namespace nvinfer1
07c1e99d47d18d8b08ee504fa15e4288ac081e11.cu
/* * yolo_layer.cu * * This code was originally written by wang-xinyu under MIT license. * I took it from: * * https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4 * * and made necessary modifications. * * - JK Jung */ #include "yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); assert(d == a + length); } IPluginV2IOExt* YoloLayerPlugin::clone() const NOEXCEPT { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } void YoloLayerPlugin::terminate() NOEXCEPT { CHECK(cudaFree(mAnchors)); } size_t YoloLayerPlugin::getSerializationSize() const NOEXCEPT { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } void YoloLayerPlugin::serialize(void* buffer) const NOEXCEPT { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) NOEXCEPT { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { CalDetection_NewCoords<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } #if NV_TENSORRT_MAJOR >= 8 int32_t YoloLayerPlugin::enqueue(int32_t batchSize, void const* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) NOEXCEPT #else // NV_TENSORRT_MAJOR < 8 int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) #endif // NV_TENSORRT_MAJOR { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) NOEXCEPT { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 64 || input_multiplier == 32 || \ input_multiplier == 16 || input_multiplier == 8); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) NOEXCEPT { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; } // namespace nvinfer1
2e86072eee6ddbfe0bc27aad9e86035c596b2559.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> #include <math.h> #include </usr/local/cuda-9.0/targets/x86_64-linux/include/hiprand/hiprand_kernel.h> // Kernels *********************** #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __device__ __forceinline__ size_t calcLinInd(int h, int w, int c, int b, size_t HD, size_t WD,size_t CD ){ return w + WD*(h + HD*(c + CD*(b))); } __device__ __forceinline__ void calcIndpInd(int linind, int h, int w, int c, int b, size_t HD, size_t WD,size_t CD,int *out ){ *out = w + WD*(h + HD*(c + CD*(b))); return ; } __device__ __forceinline__ void calcIndForRandom(int linind, int h, int w, int c, int b, size_t HD, size_t WD,size_t CD,int *out ){ *out = w + WD*(h + HD*(c + CD*(b))); return ; } __device__ __forceinline__ size_t calcLinInd4(int idx_a, int idx_b, int idx_c, int idx_d, size_t AD, size_t BD,size_t CD, size_t DD ){ return idx_d + DD*(idx_c + CD*(idx_b + BD*(idx_a))); } /* template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_new( unsigned int seed, const scalar_t *input, const scalar_t *p_filt, scalar_t *out, const scalar_t *pad, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b ){ const int out_w_sz = blockDim.x, out_h_sz = blockDim.y; const int out_chan_sz = gridDim.x, batchsz = gridDim.y; const int w = threadIdx.x , h = threadIdx.y; const int out_chan_id = blockIdx.x, batch_id= blockIdx.y; const int out_idx = threadIdx.x + blockDim.x*(threadIdx.y + blockDim.y*(blockIdx.x + gridDim.x*(blockIdx.y))); hiprandState_t state; //for( int channel =0 : channel < ) float temp_reg; float this_px_out = 0; bool flag = true; bool isoutboundh = false; bool isoutbound = false; float randnum = 0; int input_idx = 0; int filt_idx = 0; for (int dh=0 ; dh< out_h_sz; dh++){ isoutboundh = dh + h > inp_h; for ( int dw = 0 ; dw< out_w_sz; dw++ ){ isoutbound = isoutboundh | dw + w > inp_w; hiprand_init(seed,out_idx,dw + dh*(out_w_sz),&state); randnum = hiprand_uniform(&state); flag = true; for ( int chan = 0 ; chan < out_chan_sz; chan++){ // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,out_chan_id,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] temp_reg = p_filt[filt_idx]; if (temp_reg > randnum && flag) { if (!isoutbound){ calcLinInd(h+dh ,w+dw ,chan,batch_id, inp_h, inp_w, inp_c, &input_idx); this_px_out += input[input_idx]; } flag = false; } else{ randnum = randnum - temp_reg; } } } } out[out_idx] = this_px_out; hiprand_init(seed,0,1,&state); float randnum = hiprand_uniform(&state); hiprand_init(seed+1,0,0,&state); float randnum2 = hiprand_uniform(&state); printf("time : %f , %f --\n",randnum, randnum2); //printf("out_idx %d: left to right %d,%d,%d,%d with dims %d,%d,%d,%d\n", out_idx, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gridDim.y); ///__shared__ float[32][32] input_tile; // define shared memory } */ template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel( const scalar_t* __restrict__ input, const float* __restrict__ l_filt, scalar_t* __restrict__ out, scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalTreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalOutPx){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float randnum; float this_px_out=0; float float_temp; float log_probs=0; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; int rand_idx = 0; float p_filt; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } randnum = random[threadlinidx + totalOutPx*(dw + filt_w*( dh ))];// * isoutbound; // GLOBAL MEM ACCESS for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); float_temp = l_filt[size_temp_reg]; p_filt = expf(float_temp); if (randnum <= p_filt){ input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); //inp_indices[dh][dw] = input_idx; ////////// GL MEM ACCESS ////*********** check wether bool*float is float //randnum = 100; log_probs = float_temp; random[threadlinidx + totalOutPx*(dw + filt_w*( dh ))] = __int2float_rn(chan); break; } //flag = (flag && !flag2); //j = j + flag2; randnum = randnum - p_filt; } this_px_out += input[input_idx] -log_probs; } } //for (int j=0 ; j< input_idx; j++){ // this_px_out += input[inp_indices[j]]; //} out[threadlinidx] = this_px_out; //////////////////// GL MEM WRITE } } /* template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_single_loop( const scalar_t* __restrict__ input, const scalar_t* __restrict__ p_filt, scalar_t* __restrict__ out, const scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalThreads ){ const int threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalThreads){ // Calculate Imout Indices int int_temp_reg =1; const int im_w_idx = threadlinidx % inp_w; int_temp_reg = inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg = int_temp_reg * inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg = int_temp_reg * filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) %inp_b; int_temp_reg = int_temp_reg * inp_b; const int dw = (threadlinidx/(int_temp_reg)) %filt_w; int_temp_reg = int_temp_reg * filt_w; const int dh = (threadlinidx/(int_temp_reg)) %filt_h; int out_idx = im_w_idx; int_temp_reg= inp_w; out_idx += im_h_idx * int_temp_reg; int_temp_reg *= inp_h; out_idx += im_c_idx * int_temp_reg; int_temp_reg *= inp_c; out_idx += im_b_idx * int_temp_reg; float randnum ; int rand_idx; float temp_reg=0; float this_px_out=0; // Flags bool flag = true; bool flag2= false; bool isoutboundh = false; bool isoutbound = false; int input_idx; int filt_idx; int j = 0; isoutboundh = (dh + im_h_idx) > inp_h; isoutbound = isoutboundh || ((dw + im_w_idx) > inp_w); rand_idx = threadlinidx + totalOutPx*(dw+ filt_w*(dh)); randnum = random[rand_idx];// * isoutbound; // GLOBAL MEM ACCESS flag = true; for ( int chan = 0 ; chan < filt_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,im_c_idx,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] //temp_reg = p_filt[filt_idx]; ////////////GLOBAL MEM ACESSS flag2 = flag && (temp_reg >= randnum) && (!isoutbound); calcLinInd(im_h_idx + dh, im_w_idx + dw, chan, im_b_idx, inp_h, inp_w, inp_c, &input_idx); this_px_out = this_px_out + (flag2 * input[input_idx]); ////////// GL MEM ACESSS ////*********** check wether bool*float is float if (flag2){ break; } flag = (flag && !flag2); j = j + flag2; randnum = randnum - temp_reg; } //atomicAdd(&(out[out_idx]),this_px_out); // out[out_idx] += this_px_out; //////////////////// GL MEM WRITE } } template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_zero_loop( const scalar_t* __restrict__ input, const scalar_t* __restrict__ p_filt, scalar_t* out, const scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const long int totalOutPx, const long int totalActiveThreads ){ const long int threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalActiveThreads){ // Calculate Imout Indices int int_temp_reg =1; const int im_w_idx = threadlinidx % inp_w; int_temp_reg = inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg = int_temp_reg * inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg = int_temp_reg * filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg = int_temp_reg * inp_b; const int dw = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg = int_temp_reg * filt_w; const int dh = (threadlinidx/(int_temp_reg)) % filt_h; int_temp_reg *= filt_h; int chan = (threadlinidx/(int_temp_reg)); int out_idx = im_w_idx + inp_w*( im_h_idx + inp_h*(im_c_idx + filt_n*(im_b_idx + inp_b*(dw + filt_w*(dh ))))); float randnum=0.5 ; long int rand_idx; float current_cumprob_reg=0; float prev_cumprob_reg=0; // Flags bool flag = true; bool flag2= false; bool isoutboundh = false; bool isoutbound = false; long int input_idx; long int filt_idx; long int prev_filt_idx; int j = 0; isoutboundh = (dh + im_h_idx) >= inp_h; isoutbound = isoutboundh || ((dw + im_w_idx) >= inp_w); rand_idx = threadlinidx % (inp_w*inp_h*filt_n*inp_b*filt_w*filt_h); randnum = random[rand_idx];// * isoutbound; // GLOBAL MEM ACCESS flag = true; // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,im_c_idx,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] current_cumprob_reg = p_filt[filt_idx]; ////////////GLOBAL MEM ACESSS if (chan == 0){ prev_cumprob_reg = 0 ; } else{ calcLinInd(dh,dw,(chan-1),im_c_idx,filt_h,filt_w,filt_c, &prev_filt_idx);//[out_chan_id][chan][dh][dw] prev_cumprob_reg = p_filt[prev_filt_idx]; ////////////GLOBAL MEM ACESSS } flag2 = (prev_cumprob_reg < randnum) && (current_cumprob_reg >= randnum) && (!isoutbound); //this_px_out = input[input_idx]; if (flag2){ //out_idx = 0; calcLinInd(im_h_idx + dh, im_w_idx + dw, chan, im_b_idx, inp_h, inp_w, inp_c, &input_idx); //out[out_idx] = atomicAdd(&(out[out_idx]),input[input_idx];////////// GL MEM ACESSS ////*********** check wether bool*float is float if (out_idx > filt_h *filt_w *totalOutPx){ printf("Culprit: %d", out_idx); } out[out_idx] += input[input_idx]; //////////////////// GL MEM WRITE } //atomicAdd(&(out[out_idx]),this_px_out); // out[out_idx] += this_px_out; //////////////////// GL MEM WRITE } } /* ---------------------- Print Tests printf("number of times went in %d\n",j); hiprand_init(seed,0,1,&state); float randnum = hiprand_uniform(&state); hiprand_init(seed+1,0,0,&state); float randnum2 = hiprand_uniform(&state); printf("time : %f , %f --\n",randnum, randnum2); printf("out_idx %d: left to right %d,%d,%d,%d with dims %d,%d,%d,%d\n", out_idx, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gridDim.y); */ //__shared__ float[32][32] input_tile; // define shared memory template <typename scalar_t> __global__ void klconvs_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; // Flags // bool flag = true; // bool flag2= false; // bool isoutboundh = false; // bool isoutbound = false; int chan = __float2int_rn(random[threadlinidx]); int input_idx=0; //int filt_idx; //int j = 0; size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdin_idx = calcLinInd4(im_b_idx , chan , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); // printf("%d \n",chan); //size_t dzdin_idx = im_b_idx + chan + inp_h_idx + inp_w_idx; size_t dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); float dzdoutthis = dzdout[dzdout_idx]; //dzdin[0] += dzdoutthis; //dzdin[dzdin_idx] += dzdoutthis; atomicAdd(&(dzdin[dzdin_idx]), dzdoutthis); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis* (input[dzdin_idx] - 1 - lfilt[dzdl_filt_idx])); // p differentiable grad //atomicAdd(&(dzdl_filt[dzdl_filt_idx]), -dzdoutthis); // p NON-differentiable grad //dzdl_filt[dzdl_filt_idx] += dzdoutthis* input[dzdin_idx] ; } } template <typename scalar_t> __global__ void klconv_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; int chan = __float2int_rn(random[threadlinidx]); int input_idx=0; float dzdoutthis; float cur_lfilt; float cur_pfilt; float cur_in; for (chan =0 ; chan < inp_c ; chan++){ size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdin_idx = calcLinInd4(im_b_idx , chan , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); size_t dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); cur_lfilt = lfilt[dzdl_filt_idx]; cur_pfilt = expf(cur_lfilt); cur_in = input[dzdin_idx]; dzdoutthis = dzdout[dzdout_idx]; atomicAdd(&(dzdin[dzdin_idx]), (dzdoutthis*cur_pfilt)); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis * cur_pfilt* (cur_in - 1 - cur_lfilt)); // p differentiable grad } } } template <typename scalar_t> __global__ void klconvs_cuda_backward_rand_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; randnum = random[threadlinidx]; int chan_idx=0; int chan=0; float dzdoutthis; float cur_lfilt; float cur_pfilt; float cur_in; size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdl_filt_idx; dzdoutthis = dzdout[dzdout_idx]; for (chan =0 ; chan < inp_c ; chan++){ dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); cur_lfilt = lfilt[dzdl_filt_idx]; cur_pfilt = expf(cur_lfilt); if (cur_pfilt >= randnum){ chan_idx = chan; break; } randnum = randnum - cur_pfilt; } size_t dzdin_idx = calcLinInd4(im_b_idx , chan_idx , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); cur_in = input[dzdin_idx]; atomicAdd(&(dzdin[dzdin_idx]), (dzdoutthis)); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis * (cur_in - 1 - cur_lfilt)); // p differentiable grad } } // End Kernels ************************* //Forward wrapper ---------------------- std::vector<at::Tensor> klconvs_cuda_forward( at::Tensor input, at::Tensor log_filt){ //at::Tensor p_filt = (at::exp(log_filt)); // p_filt = p_filt.cumsum(1); const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); //printf("(%d,%d,%d,%d)\n", p_filt.size(0),p_filt.size(1),p_filt.size(2),p_filt.size(3)); //printf("filt_num:%d ",filt_num); auto out = at::zeros(input.type(),{batch_sz,filt_num,im_height,im_width}); //TODO: Remove except zero loop auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); const int totalOutPx = im_height*im_width*batch_sz*filt_num; // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = totalOutPx; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_forward_cuda", ([&] { hipLaunchKernelGGL(( klconvs_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threadsperblock), 0, 0, input.data<scalar_t>(), log_filt.data<float>(), out.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalOutPx, totalThreads ); })); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return {out,random}; } //---------------------------------------------- // Backward wrapper std::vector<at::Tensor> klconvs_cuda_backward(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt, at::Tensor random ){ const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = im_height*im_width*batch_sz*filt_num*filt_height*filt_width; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_backward_cuda", ([&] { hipLaunchKernelGGL(( klconvs_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threadsperblock), 0, 0, // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads ); })); //dzdlfilt = at::div(dzdlfilt,im_height*im_width); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( hipPeekAtLastError() ); //gpuErrchk( hipDeviceSynchronize() ); return {dzdinput, dzdlfilt}; } std::vector<at::Tensor> klconvs_cuda_backward_rand(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt ){ const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = im_height*im_width*batch_sz*filt_num*filt_height*filt_width; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_backward_rand_cuda", ([&] { hipLaunchKernelGGL(( klconvs_cuda_backward_rand_kernel<scalar_t>), dim3(blocks), dim3(threadsperblock), 0, 0, // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads ); })); //dzdlfilt = at::div(dzdlfilt,im_height*im_width); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( hipPeekAtLastError() ); //gpuErrchk( hipDeviceSynchronize() ); return {dzdinput, dzdlfilt}; }
2e86072eee6ddbfe0bc27aad9e86035c596b2559.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> #include <math.h> #include </usr/local/cuda-9.0/targets/x86_64-linux/include/curand_kernel.h> // Kernels *********************** #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __device__ __forceinline__ size_t calcLinInd(int h, int w, int c, int b, size_t HD, size_t WD,size_t CD ){ return w + WD*(h + HD*(c + CD*(b))); } __device__ __forceinline__ void calcIndpInd(int linind, int h, int w, int c, int b, size_t HD, size_t WD,size_t CD,int *out ){ *out = w + WD*(h + HD*(c + CD*(b))); return ; } __device__ __forceinline__ void calcIndForRandom(int linind, int h, int w, int c, int b, size_t HD, size_t WD,size_t CD,int *out ){ *out = w + WD*(h + HD*(c + CD*(b))); return ; } __device__ __forceinline__ size_t calcLinInd4(int idx_a, int idx_b, int idx_c, int idx_d, size_t AD, size_t BD,size_t CD, size_t DD ){ return idx_d + DD*(idx_c + CD*(idx_b + BD*(idx_a))); } /* template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_new( unsigned int seed, const scalar_t *input, const scalar_t *p_filt, scalar_t *out, const scalar_t *pad, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b ){ const int out_w_sz = blockDim.x, out_h_sz = blockDim.y; const int out_chan_sz = gridDim.x, batchsz = gridDim.y; const int w = threadIdx.x , h = threadIdx.y; const int out_chan_id = blockIdx.x, batch_id= blockIdx.y; const int out_idx = threadIdx.x + blockDim.x*(threadIdx.y + blockDim.y*(blockIdx.x + gridDim.x*(blockIdx.y))); curandState_t state; //for( int channel =0 : channel < ) float temp_reg; float this_px_out = 0; bool flag = true; bool isoutboundh = false; bool isoutbound = false; float randnum = 0; int input_idx = 0; int filt_idx = 0; for (int dh=0 ; dh< out_h_sz; dh++){ isoutboundh = dh + h > inp_h; for ( int dw = 0 ; dw< out_w_sz; dw++ ){ isoutbound = isoutboundh | dw + w > inp_w; curand_init(seed,out_idx,dw + dh*(out_w_sz),&state); randnum = curand_uniform(&state); flag = true; for ( int chan = 0 ; chan < out_chan_sz; chan++){ // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,out_chan_id,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] temp_reg = p_filt[filt_idx]; if (temp_reg > randnum && flag) { if (!isoutbound){ calcLinInd(h+dh ,w+dw ,chan,batch_id, inp_h, inp_w, inp_c, &input_idx); this_px_out += input[input_idx]; } flag = false; } else{ randnum = randnum - temp_reg; } } } } out[out_idx] = this_px_out; curand_init(seed,0,1,&state); float randnum = curand_uniform(&state); curand_init(seed+1,0,0,&state); float randnum2 = curand_uniform(&state); printf("time : %f , %f --\n",randnum, randnum2); //printf("out_idx %d: left to right %d,%d,%d,%d with dims %d,%d,%d,%d\n", out_idx, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gridDim.y); ///__shared__ float[32][32] input_tile; // define shared memory } */ template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel( const scalar_t* __restrict__ input, const float* __restrict__ l_filt, scalar_t* __restrict__ out, scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalTreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalOutPx){ // Calculate Imout Indices int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)); size_t size_temp_reg; float randnum; float this_px_out=0; float float_temp; float log_probs=0; size_t input_idx=0; int dh; int dw; int chan; int cur_im_h =0; int cur_im_w =0; int rand_idx = 0; float p_filt; for ( dh= 0 ; dh < filt_h; dh++){ cur_im_h= dh + im_h_idx - ((filt_h)/2); if (cur_im_h< 0){ continue; } if (cur_im_h >= inp_h){ break; } for (dw = 0 ; dw < filt_w; dw++ ){ cur_im_w = dw + im_w_idx - ((filt_w)/2); if (cur_im_w<0){ continue; } if (cur_im_w >= inp_w){ break; } randnum = random[threadlinidx + totalOutPx*(dw + filt_w*( dh ))];// * isoutbound; // GLOBAL MEM ACCESS for ( chan = 0 ; chan < inp_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; size_temp_reg = calcLinInd( dh, dw, chan,im_c_idx, filt_h, filt_w, inp_c); float_temp = l_filt[size_temp_reg]; p_filt = expf(float_temp); if (randnum <= p_filt){ input_idx = calcLinInd(cur_im_h, cur_im_w, chan, im_b_idx, inp_h, inp_w, inp_c); //inp_indices[dh][dw] = input_idx; ////////// GL MEM ACCESS ////*********** check wether bool*float is float //randnum = 100; log_probs = float_temp; random[threadlinidx + totalOutPx*(dw + filt_w*( dh ))] = __int2float_rn(chan); break; } //flag = (flag && !flag2); //j = j + flag2; randnum = randnum - p_filt; } this_px_out += input[input_idx] -log_probs; } } //for (int j=0 ; j< input_idx; j++){ // this_px_out += input[inp_indices[j]]; //} out[threadlinidx] = this_px_out; //////////////////// GL MEM WRITE } } /* template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_single_loop( const scalar_t* __restrict__ input, const scalar_t* __restrict__ p_filt, scalar_t* __restrict__ out, const scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalOutPx, const int totalThreads ){ const int threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalThreads){ // Calculate Imout Indices int int_temp_reg =1; const int im_w_idx = threadlinidx % inp_w; int_temp_reg = inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg = int_temp_reg * inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg = int_temp_reg * filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) %inp_b; int_temp_reg = int_temp_reg * inp_b; const int dw = (threadlinidx/(int_temp_reg)) %filt_w; int_temp_reg = int_temp_reg * filt_w; const int dh = (threadlinidx/(int_temp_reg)) %filt_h; int out_idx = im_w_idx; int_temp_reg= inp_w; out_idx += im_h_idx * int_temp_reg; int_temp_reg *= inp_h; out_idx += im_c_idx * int_temp_reg; int_temp_reg *= inp_c; out_idx += im_b_idx * int_temp_reg; float randnum ; int rand_idx; float temp_reg=0; float this_px_out=0; // Flags bool flag = true; bool flag2= false; bool isoutboundh = false; bool isoutbound = false; int input_idx; int filt_idx; int j = 0; isoutboundh = (dh + im_h_idx) > inp_h; isoutbound = isoutboundh || ((dw + im_w_idx) > inp_w); rand_idx = threadlinidx + totalOutPx*(dw+ filt_w*(dh)); randnum = random[rand_idx];// * isoutbound; // GLOBAL MEM ACCESS flag = true; for ( int chan = 0 ; chan < filt_c; chan++){ // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,im_c_idx,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] //temp_reg = p_filt[filt_idx]; ////////////GLOBAL MEM ACESSS flag2 = flag && (temp_reg >= randnum) && (!isoutbound); calcLinInd(im_h_idx + dh, im_w_idx + dw, chan, im_b_idx, inp_h, inp_w, inp_c, &input_idx); this_px_out = this_px_out + (flag2 * input[input_idx]); ////////// GL MEM ACESSS ////*********** check wether bool*float is float if (flag2){ break; } flag = (flag && !flag2); j = j + flag2; randnum = randnum - temp_reg; } //atomicAdd(&(out[out_idx]),this_px_out); // out[out_idx] += this_px_out; //////////////////// GL MEM WRITE } } template <typename scalar_t> __global__ void klconvs_cuda_forward_kernel_zero_loop( const scalar_t* __restrict__ input, const scalar_t* __restrict__ p_filt, scalar_t* out, const scalar_t* __restrict__ random, const size_t filt_h, const size_t filt_w, const size_t filt_c, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const long int totalOutPx, const long int totalActiveThreads ){ const long int threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; if (threadlinidx < totalActiveThreads){ // Calculate Imout Indices int int_temp_reg =1; const int im_w_idx = threadlinidx % inp_w; int_temp_reg = inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg = int_temp_reg * inp_h; const int im_c_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg = int_temp_reg * filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg = int_temp_reg * inp_b; const int dw = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg = int_temp_reg * filt_w; const int dh = (threadlinidx/(int_temp_reg)) % filt_h; int_temp_reg *= filt_h; int chan = (threadlinidx/(int_temp_reg)); int out_idx = im_w_idx + inp_w*( im_h_idx + inp_h*(im_c_idx + filt_n*(im_b_idx + inp_b*(dw + filt_w*(dh ))))); float randnum=0.5 ; long int rand_idx; float current_cumprob_reg=0; float prev_cumprob_reg=0; // Flags bool flag = true; bool flag2= false; bool isoutboundh = false; bool isoutbound = false; long int input_idx; long int filt_idx; long int prev_filt_idx; int j = 0; isoutboundh = (dh + im_h_idx) >= inp_h; isoutbound = isoutboundh || ((dw + im_w_idx) >= inp_w); rand_idx = threadlinidx % (inp_w*inp_h*filt_n*inp_b*filt_w*filt_h); randnum = random[rand_idx];// * isoutbound; // GLOBAL MEM ACCESS flag = true; // find the correct index of filt // get the index val from input // add to final answer; calcLinInd(dh,dw,chan,im_c_idx,filt_h,filt_w,filt_c, &filt_idx);//[out_chan_id][chan][dh][dw] current_cumprob_reg = p_filt[filt_idx]; ////////////GLOBAL MEM ACESSS if (chan == 0){ prev_cumprob_reg = 0 ; } else{ calcLinInd(dh,dw,(chan-1),im_c_idx,filt_h,filt_w,filt_c, &prev_filt_idx);//[out_chan_id][chan][dh][dw] prev_cumprob_reg = p_filt[prev_filt_idx]; ////////////GLOBAL MEM ACESSS } flag2 = (prev_cumprob_reg < randnum) && (current_cumprob_reg >= randnum) && (!isoutbound); //this_px_out = input[input_idx]; if (flag2){ //out_idx = 0; calcLinInd(im_h_idx + dh, im_w_idx + dw, chan, im_b_idx, inp_h, inp_w, inp_c, &input_idx); //out[out_idx] = atomicAdd(&(out[out_idx]),input[input_idx];////////// GL MEM ACESSS ////*********** check wether bool*float is float if (out_idx > filt_h *filt_w *totalOutPx){ printf("Culprit: %d", out_idx); } out[out_idx] += input[input_idx]; //////////////////// GL MEM WRITE } //atomicAdd(&(out[out_idx]),this_px_out); // out[out_idx] += this_px_out; //////////////////// GL MEM WRITE } } /* ---------------------- Print Tests printf("number of times went in %d\n",j); curand_init(seed,0,1,&state); float randnum = curand_uniform(&state); curand_init(seed+1,0,0,&state); float randnum2 = curand_uniform(&state); printf("time : %f , %f --\n",randnum, randnum2); printf("out_idx %d: left to right %d,%d,%d,%d with dims %d,%d,%d,%d\n", out_idx, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gridDim.y); */ //__shared__ float[32][32] input_tile; // define shared memory template <typename scalar_t> __global__ void klconvs_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; // Flags // bool flag = true; // bool flag2= false; // bool isoutboundh = false; // bool isoutbound = false; int chan = __float2int_rn(random[threadlinidx]); int input_idx=0; //int filt_idx; //int j = 0; size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdin_idx = calcLinInd4(im_b_idx , chan , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); // printf("%d \n",chan); //size_t dzdin_idx = im_b_idx + chan + inp_h_idx + inp_w_idx; size_t dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); float dzdoutthis = dzdout[dzdout_idx]; //dzdin[0] += dzdoutthis; //dzdin[dzdin_idx] += dzdoutthis; atomicAdd(&(dzdin[dzdin_idx]), dzdoutthis); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis* (input[dzdin_idx] - 1 - lfilt[dzdl_filt_idx])); // p differentiable grad //atomicAdd(&(dzdl_filt[dzdl_filt_idx]), -dzdoutthis); // p NON-differentiable grad //dzdl_filt[dzdl_filt_idx] += dzdoutthis* input[dzdin_idx] ; } } template <typename scalar_t> __global__ void klconv_cuda_backward_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; int chan = __float2int_rn(random[threadlinidx]); int input_idx=0; float dzdoutthis; float cur_lfilt; float cur_pfilt; float cur_in; for (chan =0 ; chan < inp_c ; chan++){ size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdin_idx = calcLinInd4(im_b_idx , chan , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); size_t dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); cur_lfilt = lfilt[dzdl_filt_idx]; cur_pfilt = expf(cur_lfilt); cur_in = input[dzdin_idx]; dzdoutthis = dzdout[dzdout_idx]; atomicAdd(&(dzdin[dzdin_idx]), (dzdoutthis*cur_pfilt)); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis * cur_pfilt* (cur_in - 1 - cur_lfilt)); // p differentiable grad } } } template <typename scalar_t> __global__ void klconvs_cuda_backward_rand_kernel( const scalar_t* __restrict__ input, //TODO: MAKE sure the dims are dzdin and the threads are compatible const scalar_t* __restrict__ lfilt, const scalar_t* __restrict__ dzdout, const scalar_t* __restrict__ random, float* __restrict__ dzdin, float* __restrict__ dzdl_filt, const size_t filt_h, const size_t filt_w, const size_t filt_n, const size_t inp_h, const size_t inp_w, const size_t inp_c, const size_t inp_b, const int totalThreads ){ const size_t threadlinidx = blockIdx.x*blockDim.x + threadIdx.x; int int_temp_reg; const int im_w_idx = threadlinidx % inp_w; int_temp_reg =inp_w; const int im_h_idx = (threadlinidx/int_temp_reg) % inp_h; int_temp_reg *=inp_h; const int filt_n_idx = (threadlinidx/(int_temp_reg)) % filt_n; int_temp_reg *=filt_n; const int im_b_idx = (threadlinidx/(int_temp_reg)) % inp_b; int_temp_reg *= inp_b; const int filt_w_idx = (threadlinidx/(int_temp_reg)) % filt_w; int_temp_reg *= filt_w; const int filt_h_idx = (threadlinidx/(int_temp_reg)) ; const int inp_w_idx = im_w_idx + filt_w_idx - (filt_w/2) ; const int inp_h_idx = im_h_idx + filt_h_idx - (filt_h/2) ; bool flag = threadlinidx < totalThreads && inp_w_idx >=0 && inp_w_idx <inp_w && inp_h_idx >=0 && inp_h_idx <inp_h; if (flag){ // Calculate Imout Indices float randnum; float this_px_out=0; float float_temp; randnum = random[threadlinidx]; int chan_idx=0; int chan=0; float dzdoutthis; float cur_lfilt; float cur_pfilt; float cur_in; size_t dzdout_idx = calcLinInd4(im_b_idx,filt_n_idx,im_h_idx,im_w_idx ,inp_b, filt_n, inp_h, inp_w); size_t dzdl_filt_idx; dzdoutthis = dzdout[dzdout_idx]; for (chan =0 ; chan < inp_c ; chan++){ dzdl_filt_idx = calcLinInd4(filt_n_idx , chan , filt_h_idx , filt_w_idx, filt_n, inp_c, filt_h, filt_w ); cur_lfilt = lfilt[dzdl_filt_idx]; cur_pfilt = expf(cur_lfilt); if (cur_pfilt >= randnum){ chan_idx = chan; break; } randnum = randnum - cur_pfilt; } size_t dzdin_idx = calcLinInd4(im_b_idx , chan_idx , inp_h_idx , inp_w_idx , inp_b, inp_c, inp_h, inp_w); cur_in = input[dzdin_idx]; atomicAdd(&(dzdin[dzdin_idx]), (dzdoutthis)); atomicAdd(&(dzdl_filt[dzdl_filt_idx]), dzdoutthis * (cur_in - 1 - cur_lfilt)); // p differentiable grad } } // End Kernels ************************* //Forward wrapper ---------------------- std::vector<at::Tensor> klconvs_cuda_forward( at::Tensor input, at::Tensor log_filt){ //at::Tensor p_filt = (at::exp(log_filt)); // p_filt = p_filt.cumsum(1); const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); //printf("(%d,%d,%d,%d)\n", p_filt.size(0),p_filt.size(1),p_filt.size(2),p_filt.size(3)); //printf("filt_num:%d ",filt_num); auto out = at::zeros(input.type(),{batch_sz,filt_num,im_height,im_width}); //TODO: Remove except zero loop auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); const int totalOutPx = im_height*im_width*batch_sz*filt_num; // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = totalOutPx; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_forward_cuda", ([&] { klconvs_cuda_forward_kernel<scalar_t><<<blocks, threadsperblock>>>( input.data<scalar_t>(), log_filt.data<float>(), out.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalOutPx, totalThreads ); })); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return {out,random}; } //---------------------------------------------- // Backward wrapper std::vector<at::Tensor> klconvs_cuda_backward(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt, at::Tensor random ){ const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = im_height*im_width*batch_sz*filt_num*filt_height*filt_width; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_backward_cuda", ([&] { klconvs_cuda_backward_kernel<scalar_t><<<blocks, threadsperblock>>>( // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads ); })); //dzdlfilt = at::div(dzdlfilt,im_height*im_width); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( cudaPeekAtLastError() ); //gpuErrchk( cudaDeviceSynchronize() ); return {dzdinput, dzdlfilt}; } std::vector<at::Tensor> klconvs_cuda_backward_rand(at::Tensor dzdout, at::Tensor input, at::Tensor log_filt ){ const auto batch_sz = input.size(0); const auto im_height = input.size(2); const auto im_width = input.size(3); const auto im_nchans = input.size(1); const auto filt_num = log_filt.size(0); const auto filt_height = log_filt.size(2); const auto filt_width = log_filt.size(3); auto dzdinput = at::zeros_like(input); auto dzdlfilt = at::zeros_like(log_filt); auto random = at::rand(input.type(),{filt_height,filt_width,batch_sz,filt_num,im_height,im_width}); // Single Loop const auto totalThreads = totalOutPx*filt_height*filt_width; const int totalThreads = im_height*im_width*batch_sz*filt_num*filt_height*filt_width; int j = 32; const int threadsperblock =j*32; int blockNum = (totalThreads/threadsperblock); if (totalThreads%threadsperblock != 0 ){ blockNum++; } const dim3 blocks(blockNum); //printf("blocks: %d, totaltherads/threadperbloc : %d", blocks,totalThreads/threadsperblock); AT_DISPATCH_FLOATING_TYPES(input.type(), "klconvs_backward_rand_cuda", ([&] { klconvs_cuda_backward_rand_kernel<scalar_t><<<blocks, threadsperblock>>>( // TODO : CHANGE KLCONVS AND KLCONV BACK AND FORTH. NO FORGET.... NEVER FORGET, it is easy not to seee. input.data<scalar_t>(), log_filt.data<scalar_t>(), dzdout.data<scalar_t>(), random.data<scalar_t>(),// rand . data please fix dzdinput.data<float>(), dzdlfilt.data<float>(), filt_width, filt_height, filt_num, im_width, im_height, im_nchans, batch_sz, totalThreads ); })); //dzdlfilt = at::div(dzdlfilt,im_height*im_width); //out = out.sum(0); /// ZEro Loop Version \TODO: rremove in case of diff kernel gpuErrchk( cudaPeekAtLastError() ); //gpuErrchk( cudaDeviceSynchronize() ); return {dzdinput, dzdlfilt}; }
cf448e16effa317712c20e84a3528844587f9dcb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> __global__ void reverse (int *d, const int len) { __shared__ int s[256]; int t = threadIdx.x; s[t] = d[t]; __syncthreads(); d[t] = s[len-t-1]; } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: ./%s <iterations>\n", argv[0]); return 1; } // specify the number of test cases const int iteration = atoi(argv[1]); // number of elements to reverse const int len = 256; const int elem_size = len * sizeof(int); // device result int test[len]; // expected results after reverse operations even/odd times int error = 0; int gold_odd[len]; int gold_even[len]; for (int i = 0; i < len; i++) { gold_odd[i] = len-i-1; gold_even[i] = i; } int *d_test; hipMalloc((void**)&d_test, elem_size); srand(123); for (int i = 0; i < iteration; i++) { const int count = rand() % 10000 + 100; // bound the reverse range hipMemcpy(d_test, gold_even, elem_size, hipMemcpyHostToDevice); for (int j = 0; j < count; j++) hipLaunchKernelGGL(( reverse), dim3(1), dim3(len), 0, 0, d_test, len); hipMemcpy(test, d_test, elem_size, hipMemcpyDeviceToHost); if (count % 2 == 0) error = memcmp(test, gold_even, elem_size); else error = memcmp(test, gold_odd, elem_size); if (error) break; } printf("%s\n", error ? "FAIL" : "PASS"); hipFree(d_test); return 0; }
cf448e16effa317712c20e84a3528844587f9dcb.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> __global__ void reverse (int *d, const int len) { __shared__ int s[256]; int t = threadIdx.x; s[t] = d[t]; __syncthreads(); d[t] = s[len-t-1]; } int main(int argc, char* argv[]) { if (argc != 2) { printf("Usage: ./%s <iterations>\n", argv[0]); return 1; } // specify the number of test cases const int iteration = atoi(argv[1]); // number of elements to reverse const int len = 256; const int elem_size = len * sizeof(int); // device result int test[len]; // expected results after reverse operations even/odd times int error = 0; int gold_odd[len]; int gold_even[len]; for (int i = 0; i < len; i++) { gold_odd[i] = len-i-1; gold_even[i] = i; } int *d_test; cudaMalloc((void**)&d_test, elem_size); srand(123); for (int i = 0; i < iteration; i++) { const int count = rand() % 10000 + 100; // bound the reverse range cudaMemcpy(d_test, gold_even, elem_size, cudaMemcpyHostToDevice); for (int j = 0; j < count; j++) reverse<<<1, len>>> (d_test, len); cudaMemcpy(test, d_test, elem_size, cudaMemcpyDeviceToHost); if (count % 2 == 0) error = memcmp(test, gold_even, elem_size); else error = memcmp(test, gold_odd, elem_size); if (error) break; } printf("%s\n", error ? "FAIL" : "PASS"); cudaFree(d_test); return 0; }
953addfc9be9f7678956a0051f78b036f1f68ec9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "multiplication.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *B = NULL; hipMalloc(&B, XSIZE*YSIZE); int *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( multiplication), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
953addfc9be9f7678956a0051f78b036f1f68ec9.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "multiplication.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); int *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); multiplication<<<gridBlock,threadBlock>>>(A,B,C,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { multiplication<<<gridBlock,threadBlock>>>(A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { multiplication<<<gridBlock,threadBlock>>>(A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
15b9f62021aa142d6640cc2c387892a92a3669db.hip
// !!! This is a file automatically generated by hipify!!! #include "cutil_temp.h" #include "DebugOutput.h" float* debug_init_memory(void) { // allocate device memory float *DEBUG_OUTPUT_D; CUDA_SAFE_CALL(hipMalloc((void**) &DEBUG_OUTPUT_D, \ sizeof(float))); CUDA_SAFE_CALL(hipMemcpyToSymbol("DEBUG_OUTPUT_C", \ &DEBUG_OUTPUT_D , \ sizeof(float*), 0, \ hipMemcpyHostToDevice)); // Initialize memory to zero debug_reset(DEBUG_OUTPUT_D); return DEBUG_OUTPUT_D; } float debug_print(float* DEBUG_OUTPUT_D) { float DEBUG_OUTPUT; CUDA_SAFE_CALL(hipMemcpy(&DEBUG_OUTPUT, \ DEBUG_OUTPUT_D, \ sizeof (float), \ hipMemcpyDeviceToHost) ); return DEBUG_OUTPUT; } void debug_reset(float* DEBUG_OUTPUT_D) { // Reset memory to zero float zero = 0; CUDA_SAFE_CALL(hipMemcpy(DEBUG_OUTPUT_D, \ &zero, \ sizeof (float), \ hipMemcpyHostToDevice) ); }
15b9f62021aa142d6640cc2c387892a92a3669db.cu
#include "cutil_temp.h" #include "DebugOutput.h" float* debug_init_memory(void) { // allocate device memory float *DEBUG_OUTPUT_D; CUDA_SAFE_CALL(cudaMalloc((void**) &DEBUG_OUTPUT_D, \ sizeof(float))); CUDA_SAFE_CALL(cudaMemcpyToSymbol("DEBUG_OUTPUT_C", \ &DEBUG_OUTPUT_D , \ sizeof(float*), 0, \ cudaMemcpyHostToDevice)); // Initialize memory to zero debug_reset(DEBUG_OUTPUT_D); return DEBUG_OUTPUT_D; } float debug_print(float* DEBUG_OUTPUT_D) { float DEBUG_OUTPUT; CUDA_SAFE_CALL(cudaMemcpy(&DEBUG_OUTPUT, \ DEBUG_OUTPUT_D, \ sizeof (float), \ cudaMemcpyDeviceToHost) ); return DEBUG_OUTPUT; } void debug_reset(float* DEBUG_OUTPUT_D) { // Reset memory to zero float zero = 0; CUDA_SAFE_CALL(cudaMemcpy(DEBUG_OUTPUT_D, \ &zero, \ sizeof (float), \ cudaMemcpyHostToDevice) ); }
b7a9402b981a13df475fbdf1a35052c21fc6ea97.hip
// !!! This is a file automatically generated by hipify!!! #include "flo/device/cu_raii.cuh" #include <iostream> #include <array> FLO_DEVICE_NAMESPACE_BEGIN namespace cu_raii { Stream::Stream() { status = hipStreamCreate(&handle); } Stream::~Stream() { join(); status = hipStreamDestroy(handle); } Stream::operator hipStream_t() const noexcept { return handle; } void Stream::join() noexcept { status = hipStreamSynchronize(handle); } namespace solver { SolverSp::SolverSp() { status = cusolverSpCreate(&handle); } SolverSp::~SolverSp() { cusolverSpDestroy(handle); } SolverSp::operator cusolverSpHandle_t() const noexcept { return handle; } bool SolverSp::error_check(int line) const noexcept { if (status == CUSOLVER_STATUS_SUCCESS) return false; static constexpr std::array<const char*, 8> error_string = { "CUSOLVER_SUCCESS", "CUSOLVER_NOT_INITIALIZED", "CUSOLVER_ALLOC_FAILED", "CUSOLVER_INVALID_VALUE", "CUSOLVER_ARCH_MISMATCH", "CUSOLVER_EXECUTION_FAILED", "CUSOLVER_INTERNAL_ERROR", "CUSOLVER_MATRIX_TYPE_NOT_SUPPORTED"}; std::cout << error_string[status]; if (line != -1) std::cout << ", on line" << line; std::cout << '\n'; return true; } void SolverSp::error_assert(int line) const noexcept { if (error_check(line)) std::exit(1); } } namespace sparse { Handle::Handle() { status = hipsparseCreate(&handle); } Handle::~Handle() { hipsparseDestroy(handle); } Handle::operator hipsparseHandle_t() const noexcept { return handle; } bool Handle::error_check(int line) const noexcept { if (status == HIPSPARSE_STATUS_SUCCESS) return false; static constexpr std::array<const char*, 9> error_string = { "CUSPARSE_SUCCESS", "CUSPARSE_NOT_INITIALIZED", "CUSPARSE_ALLOC_FAILED", "CUSPARSE_INVALID_VALUE", "CUSPARSE_ARCH_MISMATCH", "CUSPARSE_MAPPING_ERROR", "CUSPARSE_EXECUTION_FAILED", "CUSPARSE_INTERNAL_ERROR", "CUSPARSE_MATRIX_TYPE_NOT_SUPPORTED"}; std::cout << error_string[status]; if (line != -1) std::cout << ", on line" << line; std::cout << '\n'; return true; } void Handle::error_assert(int line) const noexcept { if (error_check(line)) std::exit(1); } MatrixDescription::MatrixDescription() { hipsparseCreateMatDescr(&description); } MatrixDescription::MatrixDescription(hipsparseStatus_t* io_status) { *io_status = hipsparseCreateMatDescr(&description); } MatrixDescription::~MatrixDescription() { hipsparseDestroyMatDescr(description); } MatrixDescription::operator hipsparseMatDescr_t() const noexcept { return description; } } // namespace sparse } // namespace cu_raii FLO_DEVICE_NAMESPACE_END
b7a9402b981a13df475fbdf1a35052c21fc6ea97.cu
#include "flo/device/cu_raii.cuh" #include <iostream> #include <array> FLO_DEVICE_NAMESPACE_BEGIN namespace cu_raii { Stream::Stream() { status = cudaStreamCreate(&handle); } Stream::~Stream() { join(); status = cudaStreamDestroy(handle); } Stream::operator cudaStream_t() const noexcept { return handle; } void Stream::join() noexcept { status = cudaStreamSynchronize(handle); } namespace solver { SolverSp::SolverSp() { status = cusolverSpCreate(&handle); } SolverSp::~SolverSp() { cusolverSpDestroy(handle); } SolverSp::operator cusolverSpHandle_t() const noexcept { return handle; } bool SolverSp::error_check(int line) const noexcept { if (status == CUSOLVER_STATUS_SUCCESS) return false; static constexpr std::array<const char*, 8> error_string = { "CUSOLVER_SUCCESS", "CUSOLVER_NOT_INITIALIZED", "CUSOLVER_ALLOC_FAILED", "CUSOLVER_INVALID_VALUE", "CUSOLVER_ARCH_MISMATCH", "CUSOLVER_EXECUTION_FAILED", "CUSOLVER_INTERNAL_ERROR", "CUSOLVER_MATRIX_TYPE_NOT_SUPPORTED"}; std::cout << error_string[status]; if (line != -1) std::cout << ", on line" << line; std::cout << '\n'; return true; } void SolverSp::error_assert(int line) const noexcept { if (error_check(line)) std::exit(1); } } namespace sparse { Handle::Handle() { status = cusparseCreate(&handle); } Handle::~Handle() { cusparseDestroy(handle); } Handle::operator cusparseHandle_t() const noexcept { return handle; } bool Handle::error_check(int line) const noexcept { if (status == CUSPARSE_STATUS_SUCCESS) return false; static constexpr std::array<const char*, 9> error_string = { "CUSPARSE_SUCCESS", "CUSPARSE_NOT_INITIALIZED", "CUSPARSE_ALLOC_FAILED", "CUSPARSE_INVALID_VALUE", "CUSPARSE_ARCH_MISMATCH", "CUSPARSE_MAPPING_ERROR", "CUSPARSE_EXECUTION_FAILED", "CUSPARSE_INTERNAL_ERROR", "CUSPARSE_MATRIX_TYPE_NOT_SUPPORTED"}; std::cout << error_string[status]; if (line != -1) std::cout << ", on line" << line; std::cout << '\n'; return true; } void Handle::error_assert(int line) const noexcept { if (error_check(line)) std::exit(1); } MatrixDescription::MatrixDescription() { cusparseCreateMatDescr(&description); } MatrixDescription::MatrixDescription(cusparseStatus_t* io_status) { *io_status = cusparseCreateMatDescr(&description); } MatrixDescription::~MatrixDescription() { cusparseDestroyMatDescr(description); } MatrixDescription::operator cusparseMatDescr_t() const noexcept { return description; } } // namespace sparse } // namespace cu_raii FLO_DEVICE_NAMESPACE_END
07668428ac8d59e8546f8ecade34e46831e909ac.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "set_with_value_util_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double2 __restrict *buf = NULL; hipMalloc(&buf, XSIZE*YSIZE); double v = 1; int elem_count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buf,v,elem_count); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buf,v,elem_count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( set_with_value_util_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, buf,v,elem_count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
07668428ac8d59e8546f8ecade34e46831e909ac.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "set_with_value_util_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double2 __restrict *buf = NULL; cudaMalloc(&buf, XSIZE*YSIZE); double v = 1; int elem_count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); set_with_value_util_kernel<<<gridBlock,threadBlock>>>(buf,v,elem_count); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { set_with_value_util_kernel<<<gridBlock,threadBlock>>>(buf,v,elem_count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { set_with_value_util_kernel<<<gridBlock,threadBlock>>>(buf,v,elem_count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4181745b1f7817766144a41bcad502d558916094.hip
// !!! This is a file automatically generated by hipify!!! /** * jrc_cuda_rho.cu * block loading rho calculation. should be much faster * system('nvcc -ptx -m 64 -arch sm_35 jrc_cuda_rho.cu') * i1 is multiple of chunk (16) * J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11 * 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc) */ #include <hip/hip_runtime.h> // #include "rocblas.h" #include <math.h> #define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val) #define MIN(A,B) ((A)<(B)) ? (A) : (B) #define MAX(A,B) ((A)>(B)) ? (A) : (B) #define NTHREADS 128 #define NC 45 //max dimm #define CHUNK 16 #define SINGLE_INF (3.402E+38) // equipvalent to NAN. consider -1 value /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * Step through one B at a time */ __global__ void jrc_cuda_rho(float * vrRho1, const float * mrFet12, const int * viiSpk12_ord, const int * vnConst, const float dc2){ //__global__ void jrc_cuda_rho(int *vnRho1, int *vnComp1, float const *mrFet12, int const *viiSpk12_ord, int const *vnC4, float const dc2){ int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; // base index of i1 int tx = threadIdx.x; //nThreadsGPU for i12 index int i1_tx = i1+tx; int n1 = vnConst[0]; int n12 = vnConst[1]; int nC = vnConst[2]; int dn_max = vnConst[3]; int fDc_spk = vnConst[4]; __shared__ int viiSpk1_ord_[CHUNK]; __shared__ float mrFet1_[NC][CHUNK]; __shared__ int mnRho1_[NTHREADS][CHUNK]; // count then divide later __shared__ int mnComp1_[NTHREADS][CHUNK]; // count number of elements compared __shared__ float vrDc1_[CHUNK]; // use if fDc_spk=1 // cache shared memory if (tx < nC){ //use tx as iC for (int i_c = 0; i_c < CHUNK; ++i_c){ int i1_c = i_c + i1; if (i1_c < n1){ mrFet1_[tx][i_c] = mrFet12[tx + i1_c * nC]; }else{ mrFet1_[tx][i_c] = 0.0f; } } } if (tx < CHUNK && i1_tx < n1) viiSpk1_ord_[tx] = viiSpk12_ord[i1_tx]; for (int i_c = 0; i_c < CHUNK; ++i_c){ mnRho1_[tx][i_c] = 0; // initialize rho mnComp1_[tx][i_c] = 0; } // calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1 if (tx < CHUNK && fDc_spk==1){ vrDc1_[tx] = 0.0f; //init //for (int iC = 0; iC < 1; ++iC){ //center only scale for (int iC = 0; iC < nC; ++iC){ float temp_ = mrFet1_[iC][tx]; vrDc1_[tx] += (temp_ * temp_); } vrDc1_[tx] *= dc2; } __syncthreads(); // Inspect distance relationship between i1 and i12_tx for (int i12_tx = tx; i12_tx < n12; i12_tx += blockDim.x){ //for (int i12_tx = 1; i12_tx < n12; ++i12_tx){ // compute time difference //char vlDist_c[CHUNK]; int iiSpk12_ord_tx = viiSpk12_ord[i12_tx]; /*for (int i_c = 0; i_c < CHUNK; ++i_c){ int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx); vlDist_c[i_c] = (di_spk_tx <= dn_max); } */ // compute distance float vrDist_c[CHUNK]; for (int i_c = 0; i_c < CHUNK; ++i_c) vrDist_c[i_c] = 0.0f; for (int iC = 0; iC < nC; ++iC){ float fet12_tx = mrFet12[iC + i12_tx * nC]; for (int i_c = 0; i_c < CHUNK; ++i_c){ float temp = fet12_tx - mrFet1_[iC][i_c]; vrDist_c[i_c] += temp * temp; } } // Compare the index and distance for (int i_c = 0; i_c < CHUNK; ++i_c){ int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx); if (di_spk_tx <= dn_max){ //if (vlDist_c[i_c] == 1){ ++mnComp1_[tx][i_c]; if (fDc_spk==0){ if (vrDist_c[i_c] <= dc2) ++mnRho1_[tx][i_c]; }else{ if (vrDist_c[i_c] < vrDc1_[i_c]) ++mnRho1_[tx][i_c]; } } } } // while // final count __syncthreads(); //if (tx < CHUNK && i1_tx < n1){ // use tx as i_c if (tx < CHUNK){ // use tx as i_c int nRho1 = 0; int nComp1 = 0; for (int tx1=0; tx1<blockDim.x; ++tx1){ nRho1 += mnRho1_[tx1][tx]; nComp1 += mnComp1_[tx1][tx]; } if (i1_tx < n1){ //if (nRho1<1) nRho1 = 1; vrRho1[i1_tx] = (float)(((double)(nRho1)) / ((double)nComp1)); } // vnRho1[i1 + i_c_] = nRho1 - 1; // vnComp1[i1 + i_c_] = nComp1; } //vnRho1[0] = blockDim.x; //debug //vnComp1[0] = blockDim.x; //debug } // func
4181745b1f7817766144a41bcad502d558916094.cu
/** * jrc_cuda_rho.cu * block loading rho calculation. should be much faster * system('nvcc -ptx -m 64 -arch sm_35 jrc_cuda_rho.cu') * i1 is multiple of chunk (16) * J. James Jun, Vidrio Technologies, LLC., 2017 Jun 11 * 7/13/17: fDc_spk option added, which uses spike-specific distance cut-off (dc) */ #include <cuda_runtime.h> // #include "cublas_v2.h" #include <math.h> #define ABS(my_val) ((my_val) < 0) ? (-1*(my_val)) : (my_val) #define MIN(A,B) ((A)<(B)) ? (A) : (B) #define MAX(A,B) ((A)>(B)) ? (A) : (B) #define NTHREADS 128 #define NC 45 //max dimm #define CHUNK 16 #define SINGLE_INF (3.402E+38) // equipvalent to NAN. consider -1 value /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * Step through one B at a time */ __global__ void jrc_cuda_rho(float * vrRho1, const float * mrFet12, const int * viiSpk12_ord, const int * vnConst, const float dc2){ //__global__ void jrc_cuda_rho(int *vnRho1, int *vnComp1, float const *mrFet12, int const *viiSpk12_ord, int const *vnC4, float const dc2){ int i1 = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK; // base index of i1 int tx = threadIdx.x; //nThreadsGPU for i12 index int i1_tx = i1+tx; int n1 = vnConst[0]; int n12 = vnConst[1]; int nC = vnConst[2]; int dn_max = vnConst[3]; int fDc_spk = vnConst[4]; __shared__ int viiSpk1_ord_[CHUNK]; __shared__ float mrFet1_[NC][CHUNK]; __shared__ int mnRho1_[NTHREADS][CHUNK]; // count then divide later __shared__ int mnComp1_[NTHREADS][CHUNK]; // count number of elements compared __shared__ float vrDc1_[CHUNK]; // use if fDc_spk=1 // cache shared memory if (tx < nC){ //use tx as iC for (int i_c = 0; i_c < CHUNK; ++i_c){ int i1_c = i_c + i1; if (i1_c < n1){ mrFet1_[tx][i_c] = mrFet12[tx + i1_c * nC]; }else{ mrFet1_[tx][i_c] = 0.0f; } } } if (tx < CHUNK && i1_tx < n1) viiSpk1_ord_[tx] = viiSpk12_ord[i1_tx]; for (int i_c = 0; i_c < CHUNK; ++i_c){ mnRho1_[tx][i_c] = 0; // initialize rho mnComp1_[tx][i_c] = 0; } // calculate spike-specific distance cut-off vrDc1_ only if fDc_spk==1 if (tx < CHUNK && fDc_spk==1){ vrDc1_[tx] = 0.0f; //init //for (int iC = 0; iC < 1; ++iC){ //center only scale for (int iC = 0; iC < nC; ++iC){ float temp_ = mrFet1_[iC][tx]; vrDc1_[tx] += (temp_ * temp_); } vrDc1_[tx] *= dc2; } __syncthreads(); // Inspect distance relationship between i1 and i12_tx for (int i12_tx = tx; i12_tx < n12; i12_tx += blockDim.x){ //for (int i12_tx = 1; i12_tx < n12; ++i12_tx){ // compute time difference //char vlDist_c[CHUNK]; int iiSpk12_ord_tx = viiSpk12_ord[i12_tx]; /*for (int i_c = 0; i_c < CHUNK; ++i_c){ int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx); vlDist_c[i_c] = (di_spk_tx <= dn_max); } */ // compute distance float vrDist_c[CHUNK]; for (int i_c = 0; i_c < CHUNK; ++i_c) vrDist_c[i_c] = 0.0f; for (int iC = 0; iC < nC; ++iC){ float fet12_tx = mrFet12[iC + i12_tx * nC]; for (int i_c = 0; i_c < CHUNK; ++i_c){ float temp = fet12_tx - mrFet1_[iC][i_c]; vrDist_c[i_c] += temp * temp; } } // Compare the index and distance for (int i_c = 0; i_c < CHUNK; ++i_c){ int di_spk_tx = ABS(viiSpk1_ord_[i_c] - iiSpk12_ord_tx); if (di_spk_tx <= dn_max){ //if (vlDist_c[i_c] == 1){ ++mnComp1_[tx][i_c]; if (fDc_spk==0){ if (vrDist_c[i_c] <= dc2) ++mnRho1_[tx][i_c]; }else{ if (vrDist_c[i_c] < vrDc1_[i_c]) ++mnRho1_[tx][i_c]; } } } } // while // final count __syncthreads(); //if (tx < CHUNK && i1_tx < n1){ // use tx as i_c if (tx < CHUNK){ // use tx as i_c int nRho1 = 0; int nComp1 = 0; for (int tx1=0; tx1<blockDim.x; ++tx1){ nRho1 += mnRho1_[tx1][tx]; nComp1 += mnComp1_[tx1][tx]; } if (i1_tx < n1){ //if (nRho1<1) nRho1 = 1; vrRho1[i1_tx] = (float)(((double)(nRho1)) / ((double)nComp1)); } // vnRho1[i1 + i_c_] = nRho1 - 1; // vnComp1[i1 + i_c_] = nComp1; } //vnRho1[0] = blockDim.x; //debug //vnComp1[0] = blockDim.x; //debug } // func
cc5d8ee950461d7a5c9dc40ab5114d7ecf707470.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply // and Accumulate API introduced in CUDA 9. // In this program, the compute_gemm kernel computes the result of a matrix // multiplication and addition: D = alpha * A * B + beta * C. The dimensions of // both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x // K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In // that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per // iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes // eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps // compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and // accumulating the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments // from shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B // matrices from // global memory to shared memory. After that, all warps in the CTA reuse the // A and B data from shared memory, thus reducing the number of data copies // from global memory. // - The portions of the A and B matrices are stored in shared memory with an // additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_HALF macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each // warp stores // its subtiles to shared memory. The CTA then copies the shared memory // contents to global memory, again avoiding redundant random global memory // accesses. // - Note that the CTA tile size is chosen to maximize the GPU register // utilization, // but carefully enough to avoid local memory use. // cuda-9.1_samples/0_Simple/cudaTensorCoreGemm #include <benchmark/benchmark.h> #include "gemm/args.hpp" #include "init/init.hpp" #include "utils/utils.hpp" #include <assert.h> #include <mma.h> #include <stdio.h> #ifndef WARP_SIZE #define WARP_SIZE (32) #endif // WARP_SIZE // MMA matrix tile dimensions. (16, 16, 16), (32, 8, 16), and (8, 32, 16) are // currently supported. static const int M = 16; static const int N = 16; static const int K = 16; // Implementation constants. static const int WARPS_PER_BLOCK = 8; static const int THREADS_PER_BLOCK = (WARP_SIZE * WARPS_PER_BLOCK); static const int CHUNK_K = 8; static const int BLOCK_ROW_WARPS = 2; static const int BLOCK_COL_WARPS = 4; static const int WARP_ROW_TILES = 4; static const int WARP_COL_TILES = 2; static const int BLOCK_ROW_TILES = (WARP_ROW_TILES * BLOCK_ROW_WARPS); static const int BLOCK_COL_TILES = (WARP_COL_TILES * BLOCK_COL_WARPS); static const int SHMEM_STRIDE = (N * BLOCK_ROW_TILES); static const int SHMEM_OFFSET = (N * WARP_ROW_TILES); #define C_LAYOUT wmma::mem_row_major // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 8 two-byte // "half" elements is chosen as the minimum possible shift because we must keep // each row and column 128-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. static const int SKEW_HALF = 8; using namespace nvcuda; __host__ void init_host_matrices(float *a, float *b, float *c, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = 0; i < M_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { a[i * K_GLOBAL + j] = (float) (rand() % 3); } } for (int i = 0; i < N_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { b[i * K_GLOBAL + j] = (float) (rand() % 3); } } for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) { c[t] = (float) (rand() % 3); } } __global__ void init_gemm_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, float *C, float *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) A[i] = __float2half(A_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) B[i] = __float2half(B_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) C[i] = C_h[i]; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) D[i] = 0; } __global__ void init_hgemm_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, half *C, half *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) A[i] = __float2half(A_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) B[i] = __float2half(B_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) C[i] = __float2half(C_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) D[i] = zero<half>(); } __global__ void compute_gemm(const half *A, const half *B, const float *C, float *D, float alpha, float beta, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF]; const auto M_TILES = M_GLOBAL / M; const auto N_TILES = N_GLOBAL / N; const auto K_TILES = K_GLOBAL / K; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float *) &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * K * 2 + (warpId % 2) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. float *shmem_warp_stream_ptr = (float *) &shmem[0][0] + warpId * SHMEM_STRIDE * K; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may // result in a loss of precision). Zero still needs to be specially handled // though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_COL_TILES) / N_TILES) * (BLOCK_ROW_WARPS * WARP_ROW_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared // memory. const size_t gmem_idx = (block_tile_i + warpId) * M * N_GLOBAL + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < K; i++) { typedef int4 copy_t; *((copy_t *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((copy_t *) (src_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int4 *lane_ptr = (int4 *) (warp_ptr + tile_k * K + (laneId / (WARP_SIZE / 2)) * K_GLOBAL) + (laneId % (WARP_SIZE / 2)); // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / (WARP_SIZE / 2); #pragma unroll for (int i = 0; i < (WARP_SIZE / 2); i++) { // Copy 16 bytes at once in each lane. *((int4 *) &shmem[shmem_idx][0] + (laneId % (WARP_SIZE / 2))) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int4 *) ((half *) lane_ptr + K_GLOBAL * 2); shmem_idx += 2; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const half *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const half *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL // threads in the warp are well-defined even though element indices // within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global // memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int4 *) (dst_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId) = *((int4 *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } __global__ void compute_hgemm(const half *A, const half *B, const half *C, half *D, half alpha, half beta, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF]; const auto M_TILES = M_GLOBAL / M; const auto N_TILES = N_GLOBAL / N; const auto K_TILES = K_GLOBAL / K; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. half *shmem_warp_tile_ptr = (half *) &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * K * 2 + (warpId % 2) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. half *shmem_warp_stream_ptr = (half *) &shmem[0][0] + warpId * SHMEM_STRIDE * K; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may // result in a loss of precision). Zero still needs to be specially handled // though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_COL_TILES) / N_TILES) * (BLOCK_ROW_WARPS * WARP_ROW_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared // memory. const size_t gmem_idx = (block_tile_i + warpId) * M * N_GLOBAL + block_tile_j * N; const half *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < K; i++) { typedef int2 copy_t; *((copy_t *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((copy_t *) (src_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, half> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const half *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int2 *lane_ptr = (int2 *) (warp_ptr + tile_k * K + (laneId / (WARP_SIZE / 2)) * K_GLOBAL) + (laneId % (WARP_SIZE / 2)); // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / (WARP_SIZE / 2); #pragma unroll for (int i = 0; i < (WARP_SIZE / 2); i++) { // Copy 16 bytes at once in each lane. *((int2 *) &shmem[shmem_idx][0] + (laneId % (WARP_SIZE / 2))) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int2 *) ((half *) lane_ptr + K_GLOBAL * 2); shmem_idx += 2; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const half *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const half *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL // threads in the warp are well-defined even though element indices // within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; half *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global // memory. half *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int2 *) (dst_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId) = *((int2 *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } static void CUDA_WMMA_GEMM(benchmark::State &state) { /* if (!has_cuda) { */ /* state.SkipWithError(fmt::format("CUDA_WMMA_GEMM no CUDA device found")); */ /* return; */ /* } */ // M_GLOBAL, N_GLOBAL, K_GLOBAL must be multiple of M, N and K const auto M_GLOBAL = state.range(0); const auto N_GLOBAL = state.range(1); const auto K_GLOBAL = state.range(2); const float alpha = 1.1f; const float beta = 1.2f; int dev = 0; hipDeviceProp_t deviceProp; PRINT_IF_ERROR(hipGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM7X) architecture or higher. if (deviceProp.major < 7) { printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use " "Tensor Cores. Exiting...\n"); exit(-1); } float *A_h = NULL; float *B_h = NULL; float *C_h = NULL; PRINT_IF_ERROR(hipMallocManaged((void **) &A_h, sizeof(float) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMallocManaged((void **) &B_h, sizeof(float) * K_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(hipMallocManaged((void **) &C_h, sizeof(float) * M_GLOBAL * N_GLOBAL)); half *A = NULL; half *B = NULL; float *C = NULL; float *D = NULL; PRINT_IF_ERROR(hipMalloc((void **) &A, sizeof(half) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &B, sizeof(half) * N_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &C, sizeof(float) * M_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &D, sizeof(float) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long) A) % 128 == 0); assert(((unsigned long long) B) % 128 == 0); assert(((unsigned long long) C) % 128 == 0); assert(((unsigned long long) D) % 128 == 0); init_host_matrices(A_h, B_h, C_h, M_GLOBAL, N_GLOBAL, K_GLOBAL); PRINT_IF_LAUNCH_ERROR( hipLaunchKernelGGL(( (init_gemm_device_matrices), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), 0, 0, A_h, B_h, C_h, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL))); PRINT_IF_ERROR(hipDeviceSynchronize()); enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 }; // printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); PRINT_IF_ERROR(hipFuncSetAttribute( compute_gemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); hipEvent_t start, stop; PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); hipLaunchKernelGGL(( (compute_gemm), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta, M_GLOBAL, N_GLOBAL, K_GLOBAL)); PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"M", M_GLOBAL}, {"N", N_GLOBAL}, {"K", K_GLOBAL}, {"num_elements", M_GLOBAL * N_GLOBAL * K_GLOBAL}, {"flops", {state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL * K_GLOBAL, benchmark::Counter::kAvgThreadsRate}}}); PRINT_IF_ERROR(hipEventDestroy(start)); PRINT_IF_ERROR(hipEventDestroy(stop)); PRINT_IF_ERROR(hipFree((void *) A_h)); PRINT_IF_ERROR(hipFree((void *) B_h)); PRINT_IF_ERROR(hipFree((void *) C_h)); PRINT_IF_ERROR(hipFree((void *) A)); PRINT_IF_ERROR(hipFree((void *) B)); PRINT_IF_ERROR(hipFree((void *) C)); PRINT_IF_ERROR(hipFree((void *) D)); hipDeviceReset(); } static void CUDA_WMMA_HGEMM(benchmark::State &state) { // M_GLOBAL, N_GLOBAL, K_GLOBAL must be multiple of M, N and K const auto M_GLOBAL = state.range(0); const auto N_GLOBAL = state.range(1); const auto K_GLOBAL = state.range(2); const __half alpha = approx_float_to_half(1.1f); const __half beta = approx_float_to_half(1.2f); int dev = 0; hipDeviceProp_t deviceProp; PRINT_IF_ERROR(hipGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM7X) architecture or higher. if (deviceProp.major < 7) { printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use " "Tensor Cores. Exiting...\n"); exit(-1); } float *A_h = NULL; float *B_h = NULL; float *C_h = NULL; PRINT_IF_ERROR(hipMallocManaged((void **) &A_h, sizeof(float) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMallocManaged((void **) &B_h, sizeof(float) * K_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(hipMallocManaged((void **) &C_h, sizeof(float) * M_GLOBAL * N_GLOBAL)); half *A = NULL; half *B = NULL; half *C = NULL; half *D = NULL; PRINT_IF_ERROR(hipMalloc((void **) &A, sizeof(half) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &B, sizeof(half) * N_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &C, sizeof(half) * M_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(hipMalloc((void **) &D, sizeof(half) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long) A) % 128 == 0); assert(((unsigned long long) B) % 128 == 0); assert(((unsigned long long) C) % 128 == 0); assert(((unsigned long long) D) % 128 == 0); init_host_matrices(A_h, B_h, C_h, M_GLOBAL, N_GLOBAL, K_GLOBAL); PRINT_IF_LAUNCH_ERROR( hipLaunchKernelGGL(( (init_hgemm_device_matrices), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), 0, 0, A_h, B_h, C_h, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL))); PRINT_IF_ERROR(hipDeviceSynchronize()); enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 }; // printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); PRINT_IF_ERROR(hipFuncSetAttribute( compute_hgemm, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); hipEvent_t start, stop; PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); hipLaunchKernelGGL(( (compute_hgemm), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, A, B, C, D, alpha, beta, M_GLOBAL, N_GLOBAL, K_GLOBAL)); PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"M", M_GLOBAL}, {"N", N_GLOBAL}, {"K", K_GLOBAL}, {"num_elements", M_GLOBAL * N_GLOBAL * K_GLOBAL}, {"flops", {state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL * K_GLOBAL, benchmark::Counter::kAvgThreadsRate}}}); PRINT_IF_ERROR(hipEventDestroy(start)); PRINT_IF_ERROR(hipEventDestroy(stop)); PRINT_IF_ERROR(hipFree((void *) A_h)); PRINT_IF_ERROR(hipFree((void *) B_h)); PRINT_IF_ERROR(hipFree((void *) C_h)); PRINT_IF_ERROR(hipFree((void *) A)); PRINT_IF_ERROR(hipFree((void *) B)); PRINT_IF_ERROR(hipFree((void *) C)); PRINT_IF_ERROR(hipFree((void *) D)); hipDeviceReset(); } BENCHMARK(CUDA_WMMA_GEMM)->ARGS()->UseManualTime(); BENCHMARK(CUDA_WMMA_HGEMM)->ARGS()->UseManualTime();
cc5d8ee950461d7a5c9dc40ab5114d7ecf707470.cu
/* * Copyright 1993-2017 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // CUDA sample demonstrating a GEMM computation using the Warp Matrix Multiply // and Accumulate API introduced in CUDA 9. // In this program, the compute_gemm kernel computes the result of a matrix // multiplication and addition: D = alpha * A * B + beta * C. The dimensions of // both C and D matrices are M_GLOBAL x N_GLOBAL. The A matrix is M_GLOBAL x // K_GLOBAL (row-major), the B matrix is K_GLOBAL x N_GLOBAL (column-major). In // that kernel, each CTA computes one 128 x 128 tile of the resulting matrix per // iteration. When the tile is computed, the CTA stores it to the global memory // and begins a new iteration, selecting a new 128 x 128 tile to compute. // Each CTA consists of eight warps. For the 128 x 128 tile, each warp computes // eight 16 x 16 subtiles, organized in a 2 x 4 two-dimensional array. Warps // compute the 16 x 16 subtiles using nvcuda::wmma::mma_sync operations by // moving through the K_GLOBAL dimension of the A and B matrices and // accumulating the intermediate result in the local thread state. // There are a number of simple optimizations used in the algorithm: // - The CTA copies the 128 x 128 tile of the C matrix from the global memory to // shared memory. After that is done, each warp loads the C matrix fragments // from shared memory, thus avoiding a random global memory access. // - On each internal iteration, the CTA copies a portion of the A and B // matrices from // global memory to shared memory. After that, all warps in the CTA reuse the // A and B data from shared memory, thus reducing the number of data copies // from global memory. // - The portions of the A and B matrices are stored in shared memory with an // additional // padding (skew) to reduce the number of shared memory access bank conflicts. // (See a detailed explanation near the SKEW_HALF macro definition.) // - When the CTA finishes computing the tiles of the resulting matrix, each // warp stores // its subtiles to shared memory. The CTA then copies the shared memory // contents to global memory, again avoiding redundant random global memory // accesses. // - Note that the CTA tile size is chosen to maximize the GPU register // utilization, // but carefully enough to avoid local memory use. // cuda-9.1_samples/0_Simple/cudaTensorCoreGemm #include <benchmark/benchmark.h> #include "gemm/args.hpp" #include "init/init.hpp" #include "utils/utils.hpp" #include <assert.h> #include <mma.h> #include <stdio.h> #ifndef WARP_SIZE #define WARP_SIZE (32) #endif // WARP_SIZE // MMA matrix tile dimensions. (16, 16, 16), (32, 8, 16), and (8, 32, 16) are // currently supported. static const int M = 16; static const int N = 16; static const int K = 16; // Implementation constants. static const int WARPS_PER_BLOCK = 8; static const int THREADS_PER_BLOCK = (WARP_SIZE * WARPS_PER_BLOCK); static const int CHUNK_K = 8; static const int BLOCK_ROW_WARPS = 2; static const int BLOCK_COL_WARPS = 4; static const int WARP_ROW_TILES = 4; static const int WARP_COL_TILES = 2; static const int BLOCK_ROW_TILES = (WARP_ROW_TILES * BLOCK_ROW_WARPS); static const int BLOCK_COL_TILES = (WARP_COL_TILES * BLOCK_COL_WARPS); static const int SHMEM_STRIDE = (N * BLOCK_ROW_TILES); static const int SHMEM_OFFSET = (N * WARP_ROW_TILES); #define C_LAYOUT wmma::mem_row_major // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 8 two-byte // "half" elements is chosen as the minimum possible shift because we must keep // each row and column 128-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. static const int SKEW_HALF = 8; using namespace nvcuda; __host__ void init_host_matrices(float *a, float *b, float *c, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = 0; i < M_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { a[i * K_GLOBAL + j] = (float) (rand() % 3); } } for (int i = 0; i < N_GLOBAL; i++) { for (int j = 0; j < K_GLOBAL; j++) { b[i * K_GLOBAL + j] = (float) (rand() % 3); } } for (int t = 0; t < M_GLOBAL * N_GLOBAL; t++) { c[t] = (float) (rand() % 3); } } __global__ void init_gemm_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, float *C, float *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) A[i] = __float2half(A_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) B[i] = __float2half(B_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) C[i] = C_h[i]; for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) D[i] = 0; } __global__ void init_hgemm_device_matrices(const float *A_h, const float *B_h, const float *C_h, half *A, half *B, half *C, half *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) A[i] = __float2half(A_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N_GLOBAL * K_GLOBAL; i += gridDim.x * blockDim.x) B[i] = __float2half(B_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) C[i] = __float2half(C_h[i]); for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < M_GLOBAL * N_GLOBAL; i += gridDim.x * blockDim.x) D[i] = zero<half>(); } __global__ void compute_gemm(const half *A, const half *B, const float *C, float *D, float alpha, float beta, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF]; const auto M_TILES = M_GLOBAL / M; const auto N_TILES = N_GLOBAL / N; const auto K_TILES = K_GLOBAL / K; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. float *shmem_warp_tile_ptr = (float *) &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * K * 2 + (warpId % 2) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. float *shmem_warp_stream_ptr = (float *) &shmem[0][0] + warpId * SHMEM_STRIDE * K; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may // result in a loss of precision). Zero still needs to be specially handled // though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_COL_TILES) / N_TILES) * (BLOCK_ROW_WARPS * WARP_ROW_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared // memory. const size_t gmem_idx = (block_tile_i + warpId) * M * N_GLOBAL + block_tile_j * N; const float *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < K; i++) { typedef int4 copy_t; *((copy_t *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((copy_t *) (src_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, float> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int4 *lane_ptr = (int4 *) (warp_ptr + tile_k * K + (laneId / (WARP_SIZE / 2)) * K_GLOBAL) + (laneId % (WARP_SIZE / 2)); // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / (WARP_SIZE / 2); #pragma unroll for (int i = 0; i < (WARP_SIZE / 2); i++) { // Copy 16 bytes at once in each lane. *((int4 *) &shmem[shmem_idx][0] + (laneId % (WARP_SIZE / 2))) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int4 *) ((half *) lane_ptr + K_GLOBAL * 2); shmem_idx += 2; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const half *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const half *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL // threads in the warp are well-defined even though element indices // within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; float *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global // memory. float *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int4 *) (dst_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId) = *((int4 *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } __global__ void compute_hgemm(const half *A, const half *B, const half *C, half *D, half alpha, half beta, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL) { extern __shared__ half shmem[][CHUNK_K * K + SKEW_HALF]; const auto M_TILES = M_GLOBAL / M; const auto N_TILES = N_GLOBAL / N; const auto K_TILES = K_GLOBAL / K; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = BLOCK_COL_TILES * M; // This pointer is used to access the C and D matrix tiles this warp computes. half *shmem_warp_tile_ptr = (half *) &shmem[0][0] + (warpId / 2) * SHMEM_STRIDE * K * 2 + (warpId % 2) * SHMEM_OFFSET; // This pointer is used to stream the C and D matrices block-wide tile to and // from shared memory. half *shmem_warp_stream_ptr = (half *) &shmem[0][0] + warpId * SHMEM_STRIDE * K; // Adjust the beta scaler, as it'll be multiplied by alpha at the end of // each tile computation. Technically this is not generally correct (may // result in a loss of precision). Zero still needs to be specially handled // though. beta /= alpha; // Each CTA slides along the 128 x 128 tiles from the top left corner of the // matrix to the right and down, and selects the next tile to compute. Once // there's no such tile, all warps in this CTA exit. for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = ((block_pos * BLOCK_COL_TILES) / N_TILES) * (BLOCK_ROW_WARPS * WARP_ROW_TILES); const unsigned int block_tile_j = (block_pos * BLOCK_COL_TILES) % N_TILES; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_TILES) { break; } // This warp's pointer to the C matrix data to copy memory from to shared // memory. const size_t gmem_idx = (block_tile_i + warpId) * M * N_GLOBAL + block_tile_j * N; const half *src_gmem_warp_stream_ptr = &C[gmem_idx]; // Stream multiple C tiles to shared memory. #pragma unroll for (int i = 0; i < K; i++) { typedef int2 copy_t; *((copy_t *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId) = *((copy_t *) (src_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId); } __syncthreads(); // These fragments will accumulate the result of A and B matrix fragment // multiplications along the K_GLOBAL dimension. wmma::fragment<wmma::accumulator, M, N, K, half> c[WARP_COL_TILES][WARP_ROW_TILES]; // Load the C matrix tiles into fragments from shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { const half *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::load_matrix_sync(c[i][j], tile_ptr, SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Scale the C matrix. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll for (int t = 0; t < c[i][j].num_elements; t++) { c[i][j].x[t] *= beta; } } } // Select what warp copies what matrix to shared memory. // Warps 0-3 copy the A matrix, warps 4-7 copy the B matrix. const half *warp_ptr = (warpId < 4) ? (&A[block_tile_i * M * K_GLOBAL] + M * K_GLOBAL * (warpId % 4) * 2) : (&B[block_tile_j * N * K_GLOBAL] + N * K_GLOBAL * (warpId % 4) * 2); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Copy slices of the A and B matrices to shared memory. // The first half of the warps in the CTA copy the A matrix, the rest copy // the B matrix. size_t shmem_idx = warpId < (WARPS_PER_BLOCK / 2) ? (M * (warpId % (WARPS_PER_BLOCK / 2)) * 2) : (N * (warpId % (WARPS_PER_BLOCK / 2)) * 2 + shmem_idx_b_off); // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int2 *lane_ptr = (int2 *) (warp_ptr + tile_k * K + (laneId / (WARP_SIZE / 2)) * K_GLOBAL) + (laneId % (WARP_SIZE / 2)); // Shift the second half of the warp to the next row / column in the // shared memory. shmem_idx += laneId / (WARP_SIZE / 2); #pragma unroll for (int i = 0; i < (WARP_SIZE / 2); i++) { // Copy 16 bytes at once in each lane. *((int2 *) &shmem[shmem_idx][0] + (laneId % (WARP_SIZE / 2))) = *lane_ptr; // Advance the global memory pointer and the shared memory index. lane_ptr = (int2 *) ((half *) lane_ptr + K_GLOBAL * 2); shmem_idx += 2; } __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, half, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, half, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const half *tile_ptr = &shmem[shmem_idx_a][k_step * K]; wmma::load_matrix_sync(a[i], tile_ptr, K * CHUNK_K + SKEW_HALF); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const half *tile_ptr = &shmem[shmem_idx_b][k_step * K]; wmma::load_matrix_sync(b[j], tile_ptr, K * CHUNK_K + SKEW_HALF); } wmma::mma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { #pragma unroll // Uniform, point-wise transformations of ALL fragment elements by ALL // threads in the warp are well-defined even though element indices // within fragment storage are not defined. for (int t = 0; t < c[i][j].num_elements; t++) c[i][j].x[t] *= alpha; half *tile_ptr = shmem_warp_tile_ptr + i * SHMEM_STRIDE * K + j * N; wmma::store_matrix_sync(tile_ptr, c[i][j], SHMEM_STRIDE, C_LAYOUT); } } __syncthreads(); // Now that shared memory contains all the D tiles, stream them to global // memory. half *dst_gmem_warp_stream_ptr = &D[gmem_idx]; #pragma unroll for (int i = 0; i < K; i++) { *((int2 *) (dst_gmem_warp_stream_ptr + N_GLOBAL * i) + laneId) = *((int2 *) (shmem_warp_stream_ptr + SHMEM_STRIDE * i) + laneId); } __syncthreads(); } } static void CUDA_WMMA_GEMM(benchmark::State &state) { /* if (!has_cuda) { */ /* state.SkipWithError(fmt::format("CUDA_WMMA_GEMM no CUDA device found")); */ /* return; */ /* } */ // M_GLOBAL, N_GLOBAL, K_GLOBAL must be multiple of M, N and K const auto M_GLOBAL = state.range(0); const auto N_GLOBAL = state.range(1); const auto K_GLOBAL = state.range(2); const float alpha = 1.1f; const float beta = 1.2f; int dev = 0; cudaDeviceProp deviceProp; PRINT_IF_ERROR(cudaGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM7X) architecture or higher. if (deviceProp.major < 7) { printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use " "Tensor Cores. Exiting...\n"); exit(-1); } float *A_h = NULL; float *B_h = NULL; float *C_h = NULL; PRINT_IF_ERROR(cudaMallocManaged((void **) &A_h, sizeof(float) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMallocManaged((void **) &B_h, sizeof(float) * K_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(cudaMallocManaged((void **) &C_h, sizeof(float) * M_GLOBAL * N_GLOBAL)); half *A = NULL; half *B = NULL; float *C = NULL; float *D = NULL; PRINT_IF_ERROR(cudaMalloc((void **) &A, sizeof(half) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &B, sizeof(half) * N_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &C, sizeof(float) * M_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &D, sizeof(float) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long) A) % 128 == 0); assert(((unsigned long long) B) % 128 == 0); assert(((unsigned long long) C) % 128 == 0); assert(((unsigned long long) D) % 128 == 0); init_host_matrices(A_h, B_h, C_h, M_GLOBAL, N_GLOBAL, K_GLOBAL); PRINT_IF_LAUNCH_ERROR( (init_gemm_device_matrices<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK>>>( A_h, B_h, C_h, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL))); PRINT_IF_ERROR(cudaDeviceSynchronize()); enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 }; // printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); PRINT_IF_ERROR(cudaFuncSetAttribute( compute_gemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); cudaEvent_t start, stop; PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); (compute_gemm<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>( A, B, C, D, alpha, beta, M_GLOBAL, N_GLOBAL, K_GLOBAL)); PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"M", M_GLOBAL}, {"N", N_GLOBAL}, {"K", K_GLOBAL}, {"num_elements", M_GLOBAL * N_GLOBAL * K_GLOBAL}, {"flops", {state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL * K_GLOBAL, benchmark::Counter::kAvgThreadsRate}}}); PRINT_IF_ERROR(cudaEventDestroy(start)); PRINT_IF_ERROR(cudaEventDestroy(stop)); PRINT_IF_ERROR(cudaFree((void *) A_h)); PRINT_IF_ERROR(cudaFree((void *) B_h)); PRINT_IF_ERROR(cudaFree((void *) C_h)); PRINT_IF_ERROR(cudaFree((void *) A)); PRINT_IF_ERROR(cudaFree((void *) B)); PRINT_IF_ERROR(cudaFree((void *) C)); PRINT_IF_ERROR(cudaFree((void *) D)); cudaDeviceReset(); } static void CUDA_WMMA_HGEMM(benchmark::State &state) { // M_GLOBAL, N_GLOBAL, K_GLOBAL must be multiple of M, N and K const auto M_GLOBAL = state.range(0); const auto N_GLOBAL = state.range(1); const auto K_GLOBAL = state.range(2); const __half alpha = approx_float_to_half(1.1f); const __half beta = approx_float_to_half(1.2f); int dev = 0; cudaDeviceProp deviceProp; PRINT_IF_ERROR(cudaGetDeviceProperties(&deviceProp, dev)); // Tensor cores require a GPU of Volta (SM7X) architecture or higher. if (deviceProp.major < 7) { printf("cudaTensorCoreGemm requires requires SM 7.0 or higher to use " "Tensor Cores. Exiting...\n"); exit(-1); } float *A_h = NULL; float *B_h = NULL; float *C_h = NULL; PRINT_IF_ERROR(cudaMallocManaged((void **) &A_h, sizeof(float) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMallocManaged((void **) &B_h, sizeof(float) * K_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(cudaMallocManaged((void **) &C_h, sizeof(float) * M_GLOBAL * N_GLOBAL)); half *A = NULL; half *B = NULL; half *C = NULL; half *D = NULL; PRINT_IF_ERROR(cudaMalloc((void **) &A, sizeof(half) * M_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &B, sizeof(half) * N_GLOBAL * K_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &C, sizeof(half) * M_GLOBAL * N_GLOBAL)); PRINT_IF_ERROR(cudaMalloc((void **) &D, sizeof(half) * M_GLOBAL * N_GLOBAL)); assert(((unsigned long long) A) % 128 == 0); assert(((unsigned long long) B) % 128 == 0); assert(((unsigned long long) C) % 128 == 0); assert(((unsigned long long) D) % 128 == 0); init_host_matrices(A_h, B_h, C_h, M_GLOBAL, N_GLOBAL, K_GLOBAL); PRINT_IF_LAUNCH_ERROR( (init_hgemm_device_matrices<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK>>>( A_h, B_h, C_h, A, B, C, D, M_GLOBAL, N_GLOBAL, K_GLOBAL))); PRINT_IF_ERROR(cudaDeviceSynchronize()); enum { SHMEM_SZ = sizeof(half) * (BLOCK_COL_TILES * M) * (CHUNK_K * K + SKEW_HALF) * 2 }; // printf("Required shared memory size: %lu Kb\n", SHMEM_SZ / 1024UL); PRINT_IF_ERROR(cudaFuncSetAttribute( compute_hgemm, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); cudaEvent_t start, stop; PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); (compute_hgemm<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>( A, B, C, D, alpha, beta, M_GLOBAL, N_GLOBAL, K_GLOBAL)); PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"M", M_GLOBAL}, {"N", N_GLOBAL}, {"K", K_GLOBAL}, {"num_elements", M_GLOBAL * N_GLOBAL * K_GLOBAL}, {"flops", {state.iterations() * 2.0 * M_GLOBAL * N_GLOBAL * K_GLOBAL, benchmark::Counter::kAvgThreadsRate}}}); PRINT_IF_ERROR(cudaEventDestroy(start)); PRINT_IF_ERROR(cudaEventDestroy(stop)); PRINT_IF_ERROR(cudaFree((void *) A_h)); PRINT_IF_ERROR(cudaFree((void *) B_h)); PRINT_IF_ERROR(cudaFree((void *) C_h)); PRINT_IF_ERROR(cudaFree((void *) A)); PRINT_IF_ERROR(cudaFree((void *) B)); PRINT_IF_ERROR(cudaFree((void *) C)); PRINT_IF_ERROR(cudaFree((void *) D)); cudaDeviceReset(); } BENCHMARK(CUDA_WMMA_GEMM)->ARGS()->UseManualTime(); BENCHMARK(CUDA_WMMA_HGEMM)->ARGS()->UseManualTime();
867bae41bb4bac251b313dbd8da43e3be0655526.hip
// !!! This is a file automatically generated by hipify!!! // Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block (16, 32, 64) #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size(16K, 32K, 64K, 128K, 256K, and 512K) #endif #ifndef NUMTRIALS #define NUMTRIALS 100 #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // ranges for the random numbers: const float XCMIN = 0.; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; __host__ void TimeOfDaySeed() { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time(&timer); double seconds = difftime(timer, mktime(&y2k)); unsigned int seed = (unsigned int)(1000.*seconds); // milliseconds srand(seed); } __host__ float Ranf(float low, float high) { float r = (float)rand(); // 0 - RAND_MAX float t = r / (float)RAND_MAX; // 0. - 1. return low + t * (high - low); } // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void Monte_Carlo( float *A, float *B, float *R, float *C ) { __shared__ float prods[BLOCKSIZE]; unsigned int numItems = blockDim.x; unsigned int tnum = threadIdx.x; unsigned int wgNum = blockIdx.x; unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; /*prods[tnum] = A[gid] * B[gid] * R[gid];*/ // solve for the intersection using the quadratic formula: float a = 2.; float b = -2.*(A[gid] + B[gid]); float c = A[gid] * A[gid] + B[gid] * B[gid] - R[gid] * R[gid]; float d = b*b - 4.*a*c; // If d is less than 0., then the circle was completely missed. (Case A) no hit. if (d < 0) prods[tnum] = 0; else { // hits the circle: // get the first intersection: d = sqrt(d); float t1 = (-b + d) / (2.*a); // time to intersect the circle float t2 = (-b - d) / (2.*a); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) no hit. if (tmin < 0) prods[tnum] = 0; else { // where does it intersect the circle? float xcir = tmin; float ycir = tmin; // get the unitized normal vector at the point of intersection: float nx = xcir - A[gid]; float ny = ycir - B[gid]; float n = sqrt(nx*nx + ny*ny); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt(inx*inx + iny*iny); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; //float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = (0. - ycir) / outy; // If t is less than 0., then the reflected beam went up instead of down. no hit. if (t < 0) prods[tnum] = 0; // Otherwise, this beam hit the infinite plate. (Case D) Increment the number of hits. else prods[tnum] = 1; } } for (int offset = 1; offset < numItems; offset *= 2) { int mask = 2 * offset - 1; __syncthreads(); if ((tnum & mask) == 0) { prods[tnum] += prods[tnum + offset]; } } __syncthreads(); if (tnum == 0) C[wgNum] = prods[0]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); /**************************Setting Up the Memory for the Arrays*************************/ // allocate host memory: float * hA = new float [ SIZE ]; // xcs float * hB = new float [ SIZE ]; // ycs float *hR = new float[SIZE]; // rs float * hC = new float [ SIZE/BLOCKSIZE ]; TimeOfDaySeed(); // seed the random number generator for( int i = 0; i < SIZE; i++ ) { /*hA[i] = hB[i] = (float) sqrt( (float)(i+1) );*/ hA[i] = Ranf(XCMIN, XCMAX); hB[i] = Ranf(YCMIN, YCMAX); hR[i] = Ranf(RMIN, RMAX); } // allocate device memory: float *dA, *dB, *dR, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsR(SIZE, 1, 1); dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 ); //__shared__ float prods[SIZE/BLOCKSIZE]; hipError_t status; status = hipMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc(reinterpret_cast<void **>(&dR), SIZE * sizeof(float)); checkCudaErrors(status); status = hipMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(float) ); checkCudaErrors( status ); /*************Copying the Arrays from the Host to the Device***********/ // copy host memory to the device: status = hipMemcpy( dA, hA, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dB, hB, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy(dR, hR, SIZE * sizeof(float), hipMemcpyHostToDevice); checkCudaErrors(status); /**********************Getting Ready to Execute***********************/ // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); /*****************************Executing the Kernel******************************/ // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { hipLaunchKernelGGL(( Monte_Carlo) , dim3(grid), dim3(threads) , 0, 0, dA, dB, dR, dC ); } /****************************Getting the Stop Time*******************************/ // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); /******************************Printing the Performance*****************************/ float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double gigaMultsPerSecond = multsPerSecond / 1000000000.; fprintf( stderr, "\t\t\t\t\tgigaTrialsPerSecond/Second = %10.2lf\t\tArray Size = %10d\tBLOCKSIZE = %d\n", SIZE, BLOCKSIZE, gigaMultsPerSecond ); /***************************Copying the Array from the Device to the Host*****************************/ // copy result from the device to the host: status = hipMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), hipMemcpyDeviceToHost ); checkCudaErrors( status ); // check the sum : double sum = 0.; for(int i = 0; i < SIZE/BLOCKSIZE; i++ ) { //fprintf(stderr, "hC[%6d] = %10.2f\n", i, hC[i]); sum += (double)hC[i]; } fprintf( stderr, "sum = %10.2lf, probability = %10.2lf\n", sum, sum / double(SIZE) ); /************************Cleaning Up******************************/ // clean up memory: delete [ ] hA; delete [ ] hB; delete[] hR; delete [ ] hC; status = hipFree( dA ); checkCudaErrors( status ); status = hipFree( dB ); checkCudaErrors( status ); status = hipFree(dR); checkCudaErrors(status); status = hipFree( dC ); checkCudaErrors( status ); return 0; }
867bae41bb4bac251b313dbd8da43e3be0655526.cu
// Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block (16, 32, 64) #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size(16K, 32K, 64K, 128K, 256K, and 512K) #endif #ifndef NUMTRIALS #define NUMTRIALS 100 #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // ranges for the random numbers: const float XCMIN = 0.; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; __host__ void TimeOfDaySeed() { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time(&timer); double seconds = difftime(timer, mktime(&y2k)); unsigned int seed = (unsigned int)(1000.*seconds); // milliseconds srand(seed); } __host__ float Ranf(float low, float high) { float r = (float)rand(); // 0 - RAND_MAX float t = r / (float)RAND_MAX; // 0. - 1. return low + t * (high - low); } // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void Monte_Carlo( float *A, float *B, float *R, float *C ) { __shared__ float prods[BLOCKSIZE]; unsigned int numItems = blockDim.x; unsigned int tnum = threadIdx.x; unsigned int wgNum = blockIdx.x; unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x; /*prods[tnum] = A[gid] * B[gid] * R[gid];*/ // solve for the intersection using the quadratic formula: float a = 2.; float b = -2.*(A[gid] + B[gid]); float c = A[gid] * A[gid] + B[gid] * B[gid] - R[gid] * R[gid]; float d = b*b - 4.*a*c; // If d is less than 0., then the circle was completely missed. (Case A) no hit. if (d < 0) prods[tnum] = 0; else { // hits the circle: // get the first intersection: d = sqrt(d); float t1 = (-b + d) / (2.*a); // time to intersect the circle float t2 = (-b - d) / (2.*a); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // If tmin is less than 0., then the circle completely engulfs the laser pointer. (Case B) no hit. if (tmin < 0) prods[tnum] = 0; else { // where does it intersect the circle? float xcir = tmin; float ycir = tmin; // get the unitized normal vector at the point of intersection: float nx = xcir - A[gid]; float ny = ycir - B[gid]; float n = sqrt(nx*nx + ny*ny); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt(inx*inx + iny*iny); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; //float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = (0. - ycir) / outy; // If t is less than 0., then the reflected beam went up instead of down. no hit. if (t < 0) prods[tnum] = 0; // Otherwise, this beam hit the infinite plate. (Case D) Increment the number of hits. else prods[tnum] = 1; } } for (int offset = 1; offset < numItems; offset *= 2) { int mask = 2 * offset - 1; __syncthreads(); if ((tnum & mask) == 0) { prods[tnum] += prods[tnum + offset]; } } __syncthreads(); if (tnum == 0) C[wgNum] = prods[0]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); /**************************Setting Up the Memory for the Arrays*************************/ // allocate host memory: float * hA = new float [ SIZE ]; // xcs float * hB = new float [ SIZE ]; // ycs float *hR = new float[SIZE]; // rs float * hC = new float [ SIZE/BLOCKSIZE ]; TimeOfDaySeed(); // seed the random number generator for( int i = 0; i < SIZE; i++ ) { /*hA[i] = hB[i] = (float) sqrt( (float)(i+1) );*/ hA[i] = Ranf(XCMIN, XCMAX); hB[i] = Ranf(YCMIN, YCMAX); hR[i] = Ranf(RMIN, RMAX); } // allocate device memory: float *dA, *dB, *dR, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsR(SIZE, 1, 1); dim3 dimsC( SIZE/BLOCKSIZE, 1, 1 ); //__shared__ float prods[SIZE/BLOCKSIZE]; cudaError_t status; status = cudaMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc(reinterpret_cast<void **>(&dR), SIZE * sizeof(float)); checkCudaErrors(status); status = cudaMalloc( reinterpret_cast<void **>(&dC), (SIZE/BLOCKSIZE)*sizeof(float) ); checkCudaErrors( status ); /*************Copying the Arrays from the Host to the Device***********/ // copy host memory to the device: status = cudaMemcpy( dA, hA, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dB, hB, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy(dR, hR, SIZE * sizeof(float), cudaMemcpyHostToDevice); checkCudaErrors(status); /**********************Getting Ready to Execute***********************/ // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); /*****************************Executing the Kernel******************************/ // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { Monte_Carlo <<< grid, threads >>>( dA, dB, dR, dC ); } /****************************Getting the Stop Time*******************************/ // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); /******************************Printing the Performance*****************************/ float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double gigaMultsPerSecond = multsPerSecond / 1000000000.; fprintf( stderr, "\t\t\t\t\tgigaTrialsPerSecond/Second = %10.2lf\t\tArray Size = %10d\tBLOCKSIZE = %d\n", SIZE, BLOCKSIZE, gigaMultsPerSecond ); /***************************Copying the Array from the Device to the Host*****************************/ // copy result from the device to the host: status = cudaMemcpy( hC, dC, (SIZE/BLOCKSIZE)*sizeof(float), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); // check the sum : double sum = 0.; for(int i = 0; i < SIZE/BLOCKSIZE; i++ ) { //fprintf(stderr, "hC[%6d] = %10.2f\n", i, hC[i]); sum += (double)hC[i]; } fprintf( stderr, "sum = %10.2lf, probability = %10.2lf\n", sum, sum / double(SIZE) ); /************************Cleaning Up******************************/ // clean up memory: delete [ ] hA; delete [ ] hB; delete[] hR; delete [ ] hC; status = cudaFree( dA ); checkCudaErrors( status ); status = cudaFree( dB ); checkCudaErrors( status ); status = cudaFree(dR); checkCudaErrors(status); status = cudaFree( dC ); checkCudaErrors( status ); return 0; }
65a485e73c7db5d16eb66b7c9faea0e2700d73a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file CTU_2D_cuda.cu * \brief Definitions of the cuda 2D CTU algorithm functions. */ #ifdef CUDA #include<stdio.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"CTU_2D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_2D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_2D.h" __global__ void Evolve_Interface_States_2D(Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F1_x, Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F1_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, int n_fields); Real CTU_Algorithm_2D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for each subgrid block sub_dimensions_2D(nx, ny, n_ghost, &nx_s, &ny_s, &block1_tot, &block2_tot, &remainder1, &remainder2, n_fields); //printf("%d %d %d %d %d %d\n", nx_s, ny_s, block1_tot, block2_tot, remainder1, remainder2); nz_s = 1; block_tot = block1_tot*block2_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / (TPB); #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim2dGrid(ngrid, 1, 1); //number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ) { // allocate buffer to copy conserved variable blocks from and to if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate memory on the GPU CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS HERE while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x and y offsets of this subgrid block // (only needed for gravitational potential) get_offsets_2D(nx_s, ny_s, n_ghost, x_off, y_off, block, block1_tot, block2_tot, remainder1, remainder2, &x_off_s, &y_off_s); // copy the conserved variables onto the GPU CudaSafeCall( hipMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyHostToDevice) ); // Step 1: Do the reconstruction #ifdef PCM hipLaunchKernelGGL(( PCM_Reconstruction_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, gama, n_fields); #endif #ifdef PLMP hipLaunchKernelGGL(( PLMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PLMC hipLaunchKernelGGL(( PLMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PLMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMP hipLaunchKernelGGL(( PPMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMP_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMC hipLaunchKernelGGL(( PPMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); hipLaunchKernelGGL(( PPMC_cuda), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif CudaCheckError(); // Step 2: Calculate the fluxes #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #ifdef CTU // Step 3: Evolve the interface states hipLaunchKernelGGL(( Evolve_Interface_States_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, Q_Ly, Q_Ry, F_y, nx_s, ny_s, n_ghost, dx, dy, dt, n_fields); CudaCheckError(); // Step 4: Calculate the fluxes again #ifdef EXACT hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Exact_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_Roe_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); hipLaunchKernelGGL(( Calculate_HLLC_Fluxes_CUDA), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #endif //CTU // Step 5: Update the conserved variable array hipLaunchKernelGGL(( Update_Conserved_Variables_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, F_x, F_y, nx_s, ny_s, x_off_s, y_off_s, n_ghost, dx, dy, xbound, ybound, dt, gama, n_fields); CudaCheckError(); // Synchronize the total and internal energy #ifdef DE hipLaunchKernelGGL(( Sync_Energies_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU hipLaunchKernelGGL(( cooling_kernel), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 6: Calculate the next timestep hipLaunchKernelGGL(( Calc_dt_2D), dim3(dim2dGrid),dim3(dim1dBlock), 0, 0, dev_conserved, nx_s, ny_s, n_ghost, dx, dy, dev_dti_array, gama); CudaCheckError(); // copy the conserved variable array back to the CPU CudaSafeCall( hipMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), hipMemcpyDeviceToHost) ); // copy the updated conserved variable array back into the host_conserved array on the CPU host_return_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // iterate through to find the maximum inverse dt for this subgrid block for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) ); // iterate through to find the minimum dt for this subgrid block for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } //printf("%f %f\n", min_dt, 0.3/max_dti); if (min_dt < 0.3/max_dti) { //printf("%f %f\n", min_dt, 0.3/max_dti); min_dt = fmax(min_dt, 1.0); max_dti = 0.3/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_CTU_2D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_CTU_2D() { // free the CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory hipFree(dev_conserved); hipFree(Q_Lx); hipFree(Q_Rx); hipFree(Q_Ly); hipFree(Q_Ry); hipFree(F_x); hipFree(F_y); hipFree(dev_dti_array); #ifdef COOLING_GPU hipFree(dev_dt_array); #endif } __global__ void Evolve_Interface_States_2D(Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x, Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, int n_fields) { Real dtodx = dt/dx; Real dtody = dt/dy; int n_cells = nx*ny; // get a thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; int tid = threadIdx.x + blockId * blockDim.x; int yid = tid / nx; int xid = tid - yid*nx; int id = xid + yid*nx; // set the new x interface states if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-2 && yid < ny-n_ghost+1) { // left int ipo = xid+1 + yid*nx; int jmo = xid + (yid-1)*nx; int ipojmo = xid+1 + (yid-1)*nx; dev_Q_Lx[ id] += 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id]); dev_Q_Lx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]); dev_Q_Lx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]); dev_Q_Lx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]); dev_Q_Lx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Lx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]); } #endif #ifdef DE dev_Q_Lx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]); #endif // right dev_Q_Rx[ id] += 0.5*dtody*(dev_F_y[ ipojmo] - dev_F_y[ ipo]); dev_Q_Rx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + ipojmo] - dev_F_y[ n_cells + ipo]); dev_Q_Rx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + ipojmo] - dev_F_y[2*n_cells + ipo]); dev_Q_Rx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + ipojmo] - dev_F_y[3*n_cells + ipo]); dev_Q_Rx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + ipojmo] - dev_F_y[4*n_cells + ipo]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Rx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + ipojmo] - dev_F_y[(5+i)*n_cells + ipo]); } #endif #ifdef DE dev_Q_Rx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + ipojmo] - dev_F_y[(n_fields-1)*n_cells + ipo]); #endif } // set the new y interface states if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1) { // left int jpo = xid + (yid+1)*nx; int imo = xid-1 + yid*nx; int jpoimo = xid-1 + (yid+1)*nx; dev_Q_Ly[ id] += 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id]); dev_Q_Ly[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]); dev_Q_Ly[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]); dev_Q_Ly[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]); dev_Q_Ly[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Ly[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]); } #endif #ifdef DE dev_Q_Ly[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]); #endif // right dev_Q_Ry[ id] += 0.5*dtodx*(dev_F_x[ jpoimo] - dev_F_x[ jpo]); dev_Q_Ry[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + jpoimo] - dev_F_x[ n_cells + jpo]); dev_Q_Ry[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + jpoimo] - dev_F_x[2*n_cells + jpo]); dev_Q_Ry[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + jpoimo] - dev_F_x[3*n_cells + jpo]); dev_Q_Ry[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + jpoimo] - dev_F_x[4*n_cells + jpo]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Ry[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + jpoimo] - dev_F_x[(5+i)*n_cells + jpo]); } #endif #ifdef DE dev_Q_Ry[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + jpoimo] - dev_F_x[(n_fields-1)*n_cells + jpo]); #endif } } #endif //CUDA
65a485e73c7db5d16eb66b7c9faea0e2700d73a4.cu
/*! \file CTU_2D_cuda.cu * \brief Definitions of the cuda 2D CTU algorithm functions. */ #ifdef CUDA #include<stdio.h> #include<math.h> #include<cuda.h> #include"global.h" #include"global_cuda.h" #include"hydro_cuda.h" #include"CTU_2D_cuda.h" #include"pcm_cuda.h" #include"plmp_cuda.h" #include"plmc_cuda.h" #include"ppmp_cuda.h" #include"ppmc_cuda.h" #include"exact_cuda.h" #include"roe_cuda.h" #include"hllc_cuda.h" #include"h_correction_2D_cuda.h" #include"cooling_cuda.h" #include"subgrid_routines_2D.h" __global__ void Evolve_Interface_States_2D(Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F1_x, Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F1_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, int n_fields); Real CTU_Algorithm_2D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int ny, int x_off, int y_off, int n_ghost, Real dx, Real dy, Real xbound, Real ybound, Real dt, int n_fields) { //Here, *host_conserved contains the entire //set of conserved variables on the grid //concatenated into a 1-d array //host_conserved0 contains the values at time n, //host_conserved1 will contain the values at time n+1 // Initialize dt values Real max_dti = 0; #ifdef COOLING_GPU Real min_dt = 1e10; #endif if ( !block_size ) { // calculate the dimensions for each subgrid block sub_dimensions_2D(nx, ny, n_ghost, &nx_s, &ny_s, &block1_tot, &block2_tot, &remainder1, &remainder2, n_fields); //printf("%d %d %d %d %d %d\n", nx_s, ny_s, block1_tot, block2_tot, remainder1, remainder2); nz_s = 1; block_tot = block1_tot*block2_tot; // number of cells in one subgrid block BLOCK_VOL = nx_s*ny_s*nz_s; // dimensions for the 1D GPU grid ngrid = (BLOCK_VOL + TPB - 1) / (TPB); #ifndef DYNAMIC_GPU_ALLOC block_size = true; #endif } // set values for GPU kernels // number of blocks per 1D grid dim3 dim2dGrid(ngrid, 1, 1); //number of threads per 1D block dim3 dim1dBlock(TPB, 1, 1); // Set up pointers for the location to copy from and to if (block_tot == 1) { tmp1 = host_conserved0; tmp2 = host_conserved1; } if ( !memory_allocated ) { // allocate buffer to copy conserved variable blocks from and to if (block_tot > 1) { if ( NULL == ( buffer = (Real *) malloc(n_fields*BLOCK_VOL*sizeof(Real)) ) ) { printf("Failed to allocate CPU buffer.\n"); } tmp1 = buffer; tmp2 = buffer; } // allocate an array on the CPU to hold max_dti returned from each thread block host_dti_array = (Real *) malloc(ngrid*sizeof(Real)); #ifdef COOLING_GPU host_dt_array = (Real *) malloc(ngrid*sizeof(Real)); #endif // allocate memory on the GPU CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Lx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Rx, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ly, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&Q_Ry, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_x, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&F_y, n_fields*BLOCK_VOL*sizeof(Real)) ); CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) ); #ifdef COOLING_GPU CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) ); #endif #ifndef DYNAMIC_GPU_ALLOC // If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory. // If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep. memory_allocated = true; #endif } // counter for which block we're on int block = 0; // START LOOP OVER SUBGRID BLOCKS HERE while (block < block_tot) { // copy the conserved variable block to the buffer host_copy_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved0, buffer, n_fields); // calculate the global x and y offsets of this subgrid block // (only needed for gravitational potential) get_offsets_2D(nx_s, ny_s, n_ghost, x_off, y_off, block, block1_tot, block2_tot, remainder1, remainder2, &x_off_s, &y_off_s); // copy the conserved variables onto the GPU CudaSafeCall( cudaMemcpy(dev_conserved, tmp1, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyHostToDevice) ); // Step 1: Do the reconstruction #ifdef PCM PCM_Reconstruction_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, Q_Ly, Q_Ry, nx_s, ny_s, n_ghost, gama, n_fields); #endif #ifdef PLMP PLMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PLMC PLMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PLMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMP PPMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMP_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif #ifdef PPMC PPMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Lx, Q_Rx, nx_s, ny_s, nz_s, n_ghost, dx, dt, gama, 0, n_fields); PPMC_cuda<<<dim2dGrid,dim1dBlock>>>(dev_conserved, Q_Ly, Q_Ry, nx_s, ny_s, nz_s, n_ghost, dy, dt, gama, 1, n_fields); #endif CudaCheckError(); // Step 2: Calculate the fluxes #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #ifdef CTU // Step 3: Evolve the interface states Evolve_Interface_States_2D<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, Q_Ly, Q_Ry, F_y, nx_s, ny_s, n_ghost, dx, dy, dt, n_fields); CudaCheckError(); // Step 4: Calculate the fluxes again #ifdef EXACT Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Exact_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef ROE Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_Roe_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif #ifdef HLLC Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Lx, Q_Rx, F_x, nx_s, ny_s, nz_s, n_ghost, gama, 0, n_fields); Calculate_HLLC_Fluxes_CUDA<<<dim2dGrid,dim1dBlock>>>(Q_Ly, Q_Ry, F_y, nx_s, ny_s, nz_s, n_ghost, gama, 1, n_fields); #endif CudaCheckError(); #endif //CTU // Step 5: Update the conserved variable array Update_Conserved_Variables_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, F_x, F_y, nx_s, ny_s, x_off_s, y_off_s, n_ghost, dx, dy, xbound, ybound, dt, gama, n_fields); CudaCheckError(); // Synchronize the total and internal energy #ifdef DE Sync_Energies_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, n_ghost, gama, n_fields); CudaCheckError(); #endif // Apply cooling #ifdef COOLING_GPU cooling_kernel<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, nz_s, n_ghost, n_fields, dt, gama, dev_dt_array); CudaCheckError(); #endif // Step 6: Calculate the next timestep Calc_dt_2D<<<dim2dGrid,dim1dBlock>>>(dev_conserved, nx_s, ny_s, n_ghost, dx, dy, dev_dti_array, gama); CudaCheckError(); // copy the conserved variable array back to the CPU CudaSafeCall( cudaMemcpy(tmp2, dev_conserved, n_fields*BLOCK_VOL*sizeof(Real), cudaMemcpyDeviceToHost) ); // copy the updated conserved variable array back into the host_conserved array on the CPU host_return_block_2D(nx, ny, nx_s, ny_s, n_ghost, block, block1_tot, block2_tot, remainder1, remainder2, BLOCK_VOL, host_conserved1, buffer, n_fields); // copy the dti array onto the CPU CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // iterate through to find the maximum inverse dt for this subgrid block for (int i=0; i<ngrid; i++) { max_dti = fmax(max_dti, host_dti_array[i]); } #ifdef COOLING_GPU // copy the dt array from cooling onto the CPU CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) ); // iterate through to find the minimum dt for this subgrid block for (int i=0; i<ngrid; i++) { min_dt = fmin(min_dt, host_dt_array[i]); } //printf("%f %f\n", min_dt, 0.3/max_dti); if (min_dt < 0.3/max_dti) { //printf("%f %f\n", min_dt, 0.3/max_dti); min_dt = fmax(min_dt, 1.0); max_dti = 0.3/min_dt; } #endif // add one to the counter block++; } #ifdef DYNAMIC_GPU_ALLOC // If memory is not single allocated then free the memory every timestep. Free_Memory_CTU_2D(); #endif // return the maximum inverse timestep return max_dti; } void Free_Memory_CTU_2D() { // free the CPU memory if (block_tot > 1) free(buffer); free(host_dti_array); #ifdef COOLING_GPU free(host_dt_array); #endif // free the GPU memory cudaFree(dev_conserved); cudaFree(Q_Lx); cudaFree(Q_Rx); cudaFree(Q_Ly); cudaFree(Q_Ry); cudaFree(F_x); cudaFree(F_y); cudaFree(dev_dti_array); #ifdef COOLING_GPU cudaFree(dev_dt_array); #endif } __global__ void Evolve_Interface_States_2D(Real *dev_Q_Lx, Real *dev_Q_Rx, Real *dev_F_x, Real *dev_Q_Ly, Real *dev_Q_Ry, Real *dev_F_y, int nx, int ny, int n_ghost, Real dx, Real dy, Real dt, int n_fields) { Real dtodx = dt/dx; Real dtody = dt/dy; int n_cells = nx*ny; // get a thread ID int blockId = blockIdx.x + blockIdx.y*gridDim.x; int tid = threadIdx.x + blockId * blockDim.x; int yid = tid / nx; int xid = tid - yid*nx; int id = xid + yid*nx; // set the new x interface states if (xid > n_ghost-2 && xid < nx-n_ghost && yid > n_ghost-2 && yid < ny-n_ghost+1) { // left int ipo = xid+1 + yid*nx; int jmo = xid + (yid-1)*nx; int ipojmo = xid+1 + (yid-1)*nx; dev_Q_Lx[ id] += 0.5*dtody*(dev_F_y[ jmo] - dev_F_y[ id]); dev_Q_Lx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + jmo] - dev_F_y[ n_cells + id]); dev_Q_Lx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + jmo] - dev_F_y[2*n_cells + id]); dev_Q_Lx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + jmo] - dev_F_y[3*n_cells + id]); dev_Q_Lx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + jmo] - dev_F_y[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Lx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + jmo] - dev_F_y[(5+i)*n_cells + id]); } #endif #ifdef DE dev_Q_Lx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + jmo] - dev_F_y[(n_fields-1)*n_cells + id]); #endif // right dev_Q_Rx[ id] += 0.5*dtody*(dev_F_y[ ipojmo] - dev_F_y[ ipo]); dev_Q_Rx[ n_cells + id] += 0.5*dtody*(dev_F_y[ n_cells + ipojmo] - dev_F_y[ n_cells + ipo]); dev_Q_Rx[2*n_cells + id] += 0.5*dtody*(dev_F_y[2*n_cells + ipojmo] - dev_F_y[2*n_cells + ipo]); dev_Q_Rx[3*n_cells + id] += 0.5*dtody*(dev_F_y[3*n_cells + ipojmo] - dev_F_y[3*n_cells + ipo]); dev_Q_Rx[4*n_cells + id] += 0.5*dtody*(dev_F_y[4*n_cells + ipojmo] - dev_F_y[4*n_cells + ipo]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Rx[(5+i)*n_cells + id] += 0.5*dtody*(dev_F_y[(5+i)*n_cells + ipojmo] - dev_F_y[(5+i)*n_cells + ipo]); } #endif #ifdef DE dev_Q_Rx[(n_fields-1)*n_cells + id] += 0.5*dtody*(dev_F_y[(n_fields-1)*n_cells + ipojmo] - dev_F_y[(n_fields-1)*n_cells + ipo]); #endif } // set the new y interface states if (yid > n_ghost-2 && yid < ny-n_ghost && xid > n_ghost-2 && xid < nx-n_ghost+1) { // left int jpo = xid + (yid+1)*nx; int imo = xid-1 + yid*nx; int jpoimo = xid-1 + (yid+1)*nx; dev_Q_Ly[ id] += 0.5*dtodx*(dev_F_x[ imo] - dev_F_x[ id]); dev_Q_Ly[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + imo] - dev_F_x[ n_cells + id]); dev_Q_Ly[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + imo] - dev_F_x[2*n_cells + id]); dev_Q_Ly[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + imo] - dev_F_x[3*n_cells + id]); dev_Q_Ly[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + imo] - dev_F_x[4*n_cells + id]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Ly[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + imo] - dev_F_x[(5+i)*n_cells + id]); } #endif #ifdef DE dev_Q_Ly[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + imo] - dev_F_x[(n_fields-1)*n_cells + id]); #endif // right dev_Q_Ry[ id] += 0.5*dtodx*(dev_F_x[ jpoimo] - dev_F_x[ jpo]); dev_Q_Ry[ n_cells + id] += 0.5*dtodx*(dev_F_x[ n_cells + jpoimo] - dev_F_x[ n_cells + jpo]); dev_Q_Ry[2*n_cells + id] += 0.5*dtodx*(dev_F_x[2*n_cells + jpoimo] - dev_F_x[2*n_cells + jpo]); dev_Q_Ry[3*n_cells + id] += 0.5*dtodx*(dev_F_x[3*n_cells + jpoimo] - dev_F_x[3*n_cells + jpo]); dev_Q_Ry[4*n_cells + id] += 0.5*dtodx*(dev_F_x[4*n_cells + jpoimo] - dev_F_x[4*n_cells + jpo]); #ifdef SCALAR for (int i=0; i<NSCALARS; i++) { dev_Q_Ry[(5+i)*n_cells + id] += 0.5*dtodx*(dev_F_x[(5+i)*n_cells + jpoimo] - dev_F_x[(5+i)*n_cells + jpo]); } #endif #ifdef DE dev_Q_Ry[(n_fields-1)*n_cells + id] += 0.5*dtodx*(dev_F_x[(n_fields-1)*n_cells + jpoimo] - dev_F_x[(n_fields-1)*n_cells + jpo]); #endif } } #endif //CUDA
3128eea1d90a385715ad2f3ee5cd5790bcb3d3b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> /* to compile : nvcc -o 2039276_Task3_A 2039276_Task3_A.cu to run: ./2039276_Task3_A Sonam Wangdi Sherpa, UID: 2039276 */ __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; //Returns encrypted password } __device__ int isMatched(char* passA, char* passB){ while(*passA) { if (*passA != *passB) break; //Changing Pointer location passA++; passB++; } // Returing the 0 if the two strings matches return *(const unsigned char*)passA - *(const unsigned char*)passB; } __global__ void crack(char * alphabet, char * numbers){ char genRawPass[4]; //Adding test passwords to genRawPass genRawPass[0] = alphabet[blockIdx.x]; genRawPass[1] = alphabet[blockIdx.y]; genRawPass[2] = numbers[threadIdx.x]; genRawPass[3] = numbers[threadIdx.y]; char password[] = "sa23"; //Raw Password being encrypted char *encPassword = CudaCrypt(password); //Comparing encrypted genRawPass with encPassword if(isMatched(CudaCrypt(genRawPass),encPassword) == 0){ printf("Your password is cracked : %s = %s\n", genRawPass, password); } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char ** argv){ char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[10] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; hipMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); hipMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, hipMemcpyHostToDevice); char * gpuNumbers; hipMalloc( (void**) &gpuNumbers, sizeof(char) * 10); hipMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 10, hipMemcpyHostToDevice); char * password; hipMalloc( (void**) &password, sizeof(char) * 26); hipMemcpy(password, argv[1], sizeof(char) * 26, hipMemcpyHostToDevice); struct timespec start, finish; long long int time_elapsed; //Start monitoring the duration clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( crack), dim3(dim3(26,26,1)), dim3(dim3(10,10,1)) , 0, 0, gpuAlphabet, gpuNumbers); hipDeviceSynchronize(); //End the duration of the program clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the duration time_difference(&start, &finish, &time_elapsed); //Print the duration taken printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
3128eea1d90a385715ad2f3ee5cd5790bcb3d3b1.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> /* to compile : nvcc -o 2039276_Task3_A 2039276_Task3_A.cu to run: ./2039276_Task3_A Sonam Wangdi Sherpa, UID: 2039276 */ __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; //Returns encrypted password } __device__ int isMatched(char* passA, char* passB){ while(*passA) { if (*passA != *passB) break; //Changing Pointer location passA++; passB++; } // Returing the 0 if the two strings matches return *(const unsigned char*)passA - *(const unsigned char*)passB; } __global__ void crack(char * alphabet, char * numbers){ char genRawPass[4]; //Adding test passwords to genRawPass genRawPass[0] = alphabet[blockIdx.x]; genRawPass[1] = alphabet[blockIdx.y]; genRawPass[2] = numbers[threadIdx.x]; genRawPass[3] = numbers[threadIdx.y]; char password[] = "sa23"; //Raw Password being encrypted char *encPassword = CudaCrypt(password); //Comparing encrypted genRawPass with encPassword if(isMatched(CudaCrypt(genRawPass),encPassword) == 0){ printf("Your password is cracked : %s = %s\n", genRawPass, password); } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char ** argv){ char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[10] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice); char * gpuNumbers; cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 10); cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 10, cudaMemcpyHostToDevice); char * password; cudaMalloc( (void**) &password, sizeof(char) * 26); cudaMemcpy(password, argv[1], sizeof(char) * 26, cudaMemcpyHostToDevice); struct timespec start, finish; long long int time_elapsed; //Start monitoring the duration clock_gettime(CLOCK_MONOTONIC, &start); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers); cudaDeviceSynchronize(); //End the duration of the program clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the duration time_difference(&start, &finish, &time_elapsed); //Print the duration taken printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
f15b709c50dcf0576ba59dda53b48308594f848d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define BLOCK_SIZE 2 int get_max_size (int a, int d) { int temp = a/d; if (a%d != 0) { temp = temp+1; } return temp; } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void fixup(unsigned int *input, unsigned int *aux, int len) { unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x > 0) { if (start + t < len) input[start + t] += aux[blockIdx.x ]; if (start + BLOCK_SIZE + t < len) input[start + BLOCK_SIZE + t] += aux[blockIdx.x ]; } } __global__ void scanPart1 (unsigned int* input, unsigned int* output, unsigned int* aux, int numElems) { extern __shared__ unsigned int sdata[]; //int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int lastReadValue = 0; //Input Read if (start + myLocalId < numElems) { sdata[myLocalId] = input[start + myLocalId]; } else { sdata[myLocalId] = 0; } if (start + BLOCK_SIZE + myLocalId < numElems) { sdata[BLOCK_SIZE + myLocalId] = input[start + BLOCK_SIZE + myLocalId]; } else { sdata[BLOCK_SIZE + myLocalId] = 0; } __syncthreads(); lastReadValue = sdata[2*BLOCK_SIZE-1]; //Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE; stride <<= 1) { int index = (myLocalId + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE) sdata[index] += sdata[index - stride]; __syncthreads(); } if (myLocalId == 0) { sdata[2*BLOCK_SIZE-1] = 0; } __syncthreads(); // Post reduction for (stride = BLOCK_SIZE ; stride; stride >>= 1) { int index = (myLocalId + 1) * stride * 2 - 1; //if (index + stride < 2 * BLOCK_SIZE) if (index < 2 * BLOCK_SIZE) { // unsigned int temp = sdata[index+stride]; // sdata[index + stride] += sdata[index]; // sdata[index] = temp; unsigned int temp = sdata[index]; sdata[index] += sdata[index-stride]; sdata[index-stride] = temp; } __syncthreads(); } if (start + myLocalId < numElems) output[start + myLocalId] = sdata[myLocalId]; if (start + BLOCK_SIZE + myLocalId < numElems) output[start + BLOCK_SIZE + myLocalId] = sdata[BLOCK_SIZE + myLocalId]; if (myLocalId == 0 && aux!=NULL) aux[blockIdx.x] = sdata[2 * BLOCK_SIZE - 1] + lastReadValue; } __global__ void splitInput(int compareAndValue, unsigned int* input, unsigned int* output, int maxElements) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; if (myGlobalId >= maxElements) { return; } if(((input[myGlobalId] & compareAndValue)) > 0) { printf("%d. %d & %d is 0\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 0; } else { printf("%d. %d & %d is 1\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 1; } printf("%d. %d\n",myGlobalId,input[myGlobalId]); } __global__ void possibleLocations (unsigned int* input, unsigned int* input_scan, unsigned int* output, unsigned int numElems, unsigned int compareAndValue) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int total = input_scan[numElems-1] + (((input[numElems-1] & compareAndValue) > 0)?0:1); printf("Total %d\n",total); if (myLocalId + start < numElems) { //output[myGlobalId] = myGlobalId - input_scan[myGlobalId] + total; output[start + myLocalId] = start + myLocalId - input_scan[start + myLocalId] + total; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start], output[start + myLocalId]); } if (myLocalId + start + BLOCK_SIZE < numElems) { output[start + myLocalId + BLOCK_SIZE] = start + myLocalId + BLOCK_SIZE - input_scan[start + myLocalId + BLOCK_SIZE] + total ; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start+BLOCK_SIZE], output[start + myLocalId + BLOCK_SIZE]); } } __global__ void finalLocations ( unsigned int* input, unsigned int* input_scan, unsigned int* input_vals, unsigned int* d_setOneIfOne, unsigned int* output, unsigned int numElems) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read if (myLocalId + start < numElems) { if (d_setOneIfOne[myLocalId + start] == 0) { output[input[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input[myLocalId + start]); } else { output[input_scan[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input_scan[myLocalId + start]); } } if (myLocalId + start + BLOCK_SIZE < numElems) { if (d_setOneIfOne[myLocalId + start + BLOCK_SIZE] == 0) { output[input[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start+ BLOCK_SIZE], input[myLocalId + start +BLOCK_SIZE]); } else { output[input_scan[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n", myGlobalId, input_vals[myLocalId + start+BLOCK_SIZE] , input_scan[myLocalId + start +BLOCK_SIZE]); } } } int main() { unsigned int h_inputVals[10] = {3, 4, 1, 2, 7, 6, 5, 0, 9, 8}; unsigned int numElems = 10; unsigned int h_bins[2]; int histo_size = sizeof(unsigned int)*2; unsigned int* d_inputVals; gpuErrchk(hipMalloc(&d_inputVals, numElems*sizeof(numElems))); gpuErrchk(hipMemcpy(d_inputVals, h_inputVals, numElems*sizeof(numElems), hipMemcpyHostToDevice)); unsigned int* d_bins; gpuErrchk(hipMalloc(&d_bins, histo_size)); unsigned int* d_setOneIfOne; unsigned int* d_possibleLocations; unsigned int* d_finalLocations; unsigned int* d_scan; unsigned int* h_scan; h_scan = (unsigned int*)malloc(numElems*sizeof(numElems)); gpuErrchk(hipMalloc(&d_setOneIfOne, numElems*sizeof(numElems))); gpuErrchk(hipMalloc(&d_scan, numElems*sizeof(numElems))); gpuErrchk(hipMalloc(&d_possibleLocations, numElems*sizeof(numElems))); for (int i=0;i<10;i++) { printf("%d ", h_inputVals[i]); } printf("\n"); unsigned int* h_setOneIfOne; h_setOneIfOne = (unsigned int*)malloc(numElems*sizeof(numElems)); for (int i=0;i<4;i++) { gpuErrchk(hipMalloc(&d_finalLocations, numElems*sizeof(numElems))); printf("Round %d\n",i); gpuErrchk(hipMemset(d_bins, 0, histo_size)); gpuErrchk(hipMemset(d_setOneIfOne,0, numElems*sizeof(numElems))); gpuErrchk(hipMemset(d_scan,0, numElems*sizeof(numElems))); int compareAndValue = 1 << i; int numberThreadPerBlock = 512; dim3 blockDim_si(numberThreadPerBlock); dim3 gridDim_si(get_max_size(numElems,numberThreadPerBlock)); hipLaunchKernelGGL(( splitInput), dim3(gridDim_si),dim3(blockDim_si), 0, 0, compareAndValue, d_inputVals, d_setOneIfOne, numElems); gpuErrchk(hipMemcpy(h_setOneIfOne, d_setOneIfOne, numElems*sizeof(numElems), hipMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n"); dim3 blockDim_sp(BLOCK_SIZE); dim3 gridDim_sp(get_max_size(numElems,2*BLOCK_SIZE)); unsigned int* d_aux; unsigned int* d_aux_scan; unsigned int* h_aux; gpuErrchk(hipMalloc(&d_aux, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); gpuErrchk(hipMalloc(&d_aux_scan, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); h_aux = (unsigned int*)malloc(get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int)); // gpuErrchk(hipMemcpy(d_scan, d_setOneIfOne, numElems*sizeof(numElems), hipMemcpyDeviceToDevice)); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); hipLaunchKernelGGL(( scanPart1), dim3(gridDim_sp),dim3(blockDim_sp),BLOCK_SIZE*2*sizeof(unsigned int), 0, d_setOneIfOne,d_scan,d_aux,numElems); gpuErrchk(hipMemcpy(h_scan, d_scan, numElems*sizeof(numElems), hipMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); dim3 blockDim_sp2(get_max_size(numElems,2*BLOCK_SIZE)); gpuErrchk(hipMemcpy(h_aux, d_aux, blockDim_sp2.x*sizeof(unsigned int), hipMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - 1, Block - %d\n",blockDim_sp2.x); hipLaunchKernelGGL(( scanPart1), dim3(1),dim3(blockDim_sp2),BLOCK_SIZE*2*sizeof(unsigned int), 0, d_aux,d_aux_scan,NULL,blockDim_sp2.x); gpuErrchk(hipMemcpy(h_aux, d_aux_scan, blockDim_sp2.x*sizeof(unsigned int), hipMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); hipLaunchKernelGGL(( fixup), dim3(gridDim_sp),dim3(blockDim_sp), 0, 0, d_scan,d_aux_scan,numElems); gpuErrchk(hipMemcpy(h_scan, d_scan, numElems*sizeof(numElems), hipMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); hipLaunchKernelGGL(( possibleLocations), dim3(gridDim_sp),dim3(blockDim_sp), 0, 0, d_inputVals,d_scan, d_possibleLocations, numElems, compareAndValue); gpuErrchk(hipMemcpy(h_setOneIfOne, d_possibleLocations, numElems*sizeof(numElems), hipMemcpyDeviceToHost)); printf ("Possible Locations are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf ("\n"); hipLaunchKernelGGL(( finalLocations), dim3(gridDim_sp),dim3(blockDim_sp), 0, 0, d_possibleLocations,d_scan,d_inputVals, d_setOneIfOne, d_finalLocations,numElems); hipDeviceSynchronize(); gpuErrchk(hipFree(d_inputVals)); d_inputVals = d_finalLocations; gpuErrchk(hipMemcpy(h_setOneIfOne, d_finalLocations, numElems*sizeof(numElems), hipMemcpyDeviceToHost)); printf ("\nFinal Positions are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n******************************************\n"); //printf("Histogram Values - %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, compareAndValue); } gpuErrchk(hipFree(d_bins)); gpuErrchk(hipFree(d_setOneIfOne)); free(h_setOneIfOne); gpuErrchk(hipFree(d_possibleLocations)); free(h_scan); return 0; }
f15b709c50dcf0576ba59dda53b48308594f848d.cu
#include <stdio.h> #define BLOCK_SIZE 2 int get_max_size (int a, int d) { int temp = a/d; if (a%d != 0) { temp = temp+1; } return temp; } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void fixup(unsigned int *input, unsigned int *aux, int len) { unsigned int t = threadIdx.x, start = 2 * blockIdx.x * BLOCK_SIZE; if (blockIdx.x > 0) { if (start + t < len) input[start + t] += aux[blockIdx.x ]; if (start + BLOCK_SIZE + t < len) input[start + BLOCK_SIZE + t] += aux[blockIdx.x ]; } } __global__ void scanPart1 (unsigned int* input, unsigned int* output, unsigned int* aux, int numElems) { extern __shared__ unsigned int sdata[]; //int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int lastReadValue = 0; //Input Read if (start + myLocalId < numElems) { sdata[myLocalId] = input[start + myLocalId]; } else { sdata[myLocalId] = 0; } if (start + BLOCK_SIZE + myLocalId < numElems) { sdata[BLOCK_SIZE + myLocalId] = input[start + BLOCK_SIZE + myLocalId]; } else { sdata[BLOCK_SIZE + myLocalId] = 0; } __syncthreads(); lastReadValue = sdata[2*BLOCK_SIZE-1]; //Reduction int stride; for (stride = 1; stride <= BLOCK_SIZE; stride <<= 1) { int index = (myLocalId + 1) * stride * 2 - 1; if (index < 2 * BLOCK_SIZE) sdata[index] += sdata[index - stride]; __syncthreads(); } if (myLocalId == 0) { sdata[2*BLOCK_SIZE-1] = 0; } __syncthreads(); // Post reduction for (stride = BLOCK_SIZE ; stride; stride >>= 1) { int index = (myLocalId + 1) * stride * 2 - 1; //if (index + stride < 2 * BLOCK_SIZE) if (index < 2 * BLOCK_SIZE) { // unsigned int temp = sdata[index+stride]; // sdata[index + stride] += sdata[index]; // sdata[index] = temp; unsigned int temp = sdata[index]; sdata[index] += sdata[index-stride]; sdata[index-stride] = temp; } __syncthreads(); } if (start + myLocalId < numElems) output[start + myLocalId] = sdata[myLocalId]; if (start + BLOCK_SIZE + myLocalId < numElems) output[start + BLOCK_SIZE + myLocalId] = sdata[BLOCK_SIZE + myLocalId]; if (myLocalId == 0 && aux!=NULL) aux[blockIdx.x] = sdata[2 * BLOCK_SIZE - 1] + lastReadValue; } __global__ void splitInput(int compareAndValue, unsigned int* input, unsigned int* output, int maxElements) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; if (myGlobalId >= maxElements) { return; } if(((input[myGlobalId] & compareAndValue)) > 0) { printf("%d. %d & %d is 0\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 0; } else { printf("%d. %d & %d is 1\n",myGlobalId,input[myGlobalId],compareAndValue); output[myGlobalId] = 1; } printf("%d. %d\n",myGlobalId,input[myGlobalId]); } __global__ void possibleLocations (unsigned int* input, unsigned int* input_scan, unsigned int* output, unsigned int numElems, unsigned int compareAndValue) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read int total = input_scan[numElems-1] + (((input[numElems-1] & compareAndValue) > 0)?0:1); printf("Total %d\n",total); if (myLocalId + start < numElems) { //output[myGlobalId] = myGlobalId - input_scan[myGlobalId] + total; output[start + myLocalId] = start + myLocalId - input_scan[start + myLocalId] + total; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start], output[start + myLocalId]); } if (myLocalId + start + BLOCK_SIZE < numElems) { output[start + myLocalId + BLOCK_SIZE] = start + myLocalId + BLOCK_SIZE - input_scan[start + myLocalId + BLOCK_SIZE] + total ; printf("%d. %d might go to %d\n",start + myLocalId,input[myLocalId + start+BLOCK_SIZE], output[start + myLocalId + BLOCK_SIZE]); } } __global__ void finalLocations ( unsigned int* input, unsigned int* input_scan, unsigned int* input_vals, unsigned int* d_setOneIfOne, unsigned int* output, unsigned int numElems) { int myGlobalId = blockDim.x*blockIdx.x + threadIdx.x; int myLocalId = threadIdx.x; int start = 2 * blockIdx.x * BLOCK_SIZE; //Each block reads 2*BLOCK_SIZE so idx*this value is total inputs read if (myLocalId + start < numElems) { if (d_setOneIfOne[myLocalId + start] == 0) { output[input[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input[myLocalId + start]); } else { output[input_scan[myLocalId + start]] = input_vals[myLocalId + start]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start], input_scan[myLocalId + start]); } } if (myLocalId + start + BLOCK_SIZE < numElems) { if (d_setOneIfOne[myLocalId + start + BLOCK_SIZE] == 0) { output[input[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n",myGlobalId, input_vals[myLocalId + start+ BLOCK_SIZE], input[myLocalId + start +BLOCK_SIZE]); } else { output[input_scan[myLocalId + start + BLOCK_SIZE]] = input_vals[myLocalId + start + BLOCK_SIZE]; printf("%d. %d goes to %d\n", myGlobalId, input_vals[myLocalId + start+BLOCK_SIZE] , input_scan[myLocalId + start +BLOCK_SIZE]); } } } int main() { unsigned int h_inputVals[10] = {3, 4, 1, 2, 7, 6, 5, 0, 9, 8}; unsigned int numElems = 10; unsigned int h_bins[2]; int histo_size = sizeof(unsigned int)*2; unsigned int* d_inputVals; gpuErrchk(cudaMalloc(&d_inputVals, numElems*sizeof(numElems))); gpuErrchk(cudaMemcpy(d_inputVals, h_inputVals, numElems*sizeof(numElems), cudaMemcpyHostToDevice)); unsigned int* d_bins; gpuErrchk(cudaMalloc(&d_bins, histo_size)); unsigned int* d_setOneIfOne; unsigned int* d_possibleLocations; unsigned int* d_finalLocations; unsigned int* d_scan; unsigned int* h_scan; h_scan = (unsigned int*)malloc(numElems*sizeof(numElems)); gpuErrchk(cudaMalloc(&d_setOneIfOne, numElems*sizeof(numElems))); gpuErrchk(cudaMalloc(&d_scan, numElems*sizeof(numElems))); gpuErrchk(cudaMalloc(&d_possibleLocations, numElems*sizeof(numElems))); for (int i=0;i<10;i++) { printf("%d ", h_inputVals[i]); } printf("\n"); unsigned int* h_setOneIfOne; h_setOneIfOne = (unsigned int*)malloc(numElems*sizeof(numElems)); for (int i=0;i<4;i++) { gpuErrchk(cudaMalloc(&d_finalLocations, numElems*sizeof(numElems))); printf("Round %d\n",i); gpuErrchk(cudaMemset(d_bins, 0, histo_size)); gpuErrchk(cudaMemset(d_setOneIfOne,0, numElems*sizeof(numElems))); gpuErrchk(cudaMemset(d_scan,0, numElems*sizeof(numElems))); int compareAndValue = 1 << i; int numberThreadPerBlock = 512; dim3 blockDim_si(numberThreadPerBlock); dim3 gridDim_si(get_max_size(numElems,numberThreadPerBlock)); splitInput<<<gridDim_si,blockDim_si>>>(compareAndValue, d_inputVals, d_setOneIfOne, numElems); gpuErrchk(cudaMemcpy(h_setOneIfOne, d_setOneIfOne, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n"); dim3 blockDim_sp(BLOCK_SIZE); dim3 gridDim_sp(get_max_size(numElems,2*BLOCK_SIZE)); unsigned int* d_aux; unsigned int* d_aux_scan; unsigned int* h_aux; gpuErrchk(cudaMalloc(&d_aux, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); gpuErrchk(cudaMalloc(&d_aux_scan, get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int))); h_aux = (unsigned int*)malloc(get_max_size(numElems,2*BLOCK_SIZE)*sizeof(unsigned int)); // gpuErrchk(cudaMemcpy(d_scan, d_setOneIfOne, numElems*sizeof(numElems), cudaMemcpyDeviceToDevice)); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); scanPart1<<<gridDim_sp,blockDim_sp,BLOCK_SIZE*2*sizeof(unsigned int)>>> (d_setOneIfOne,d_scan,d_aux,numElems); gpuErrchk(cudaMemcpy(h_scan, d_scan, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); dim3 blockDim_sp2(get_max_size(numElems,2*BLOCK_SIZE)); gpuErrchk(cudaMemcpy(h_aux, d_aux, blockDim_sp2.x*sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - 1, Block - %d\n",blockDim_sp2.x); scanPart1<<<1,blockDim_sp2,BLOCK_SIZE*2*sizeof(unsigned int)>>>(d_aux,d_aux_scan,NULL,blockDim_sp2.x); gpuErrchk(cudaMemcpy(h_aux, d_aux_scan, blockDim_sp2.x*sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (int i=0;i<blockDim_sp2.x;i++) { printf("%d ", h_aux[i]); h_aux[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); fixup<<<gridDim_sp,blockDim_sp>>>(d_scan,d_aux_scan,numElems); gpuErrchk(cudaMemcpy(h_scan, d_scan, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); for (int i=0;i<10;i++) { printf("%d ", h_scan[i]); h_scan[i] = 0; } printf("\n"); printf ("Size of Kernel is Grid - %d, Block - %d\n",gridDim_sp.x,blockDim_sp.x); possibleLocations<<<gridDim_sp,blockDim_sp>>>(d_inputVals,d_scan, d_possibleLocations, numElems, compareAndValue); gpuErrchk(cudaMemcpy(h_setOneIfOne, d_possibleLocations, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); printf ("Possible Locations are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf ("\n"); finalLocations<<<gridDim_sp,blockDim_sp>>>(d_possibleLocations,d_scan,d_inputVals, d_setOneIfOne, d_finalLocations,numElems); cudaDeviceSynchronize(); gpuErrchk(cudaFree(d_inputVals)); d_inputVals = d_finalLocations; gpuErrchk(cudaMemcpy(h_setOneIfOne, d_finalLocations, numElems*sizeof(numElems), cudaMemcpyDeviceToHost)); printf ("\nFinal Positions are \n"); for (int i=0;i<10;i++) { printf("%d ", h_setOneIfOne[i]); h_setOneIfOne[i] = 0; } printf("\n******************************************\n"); //printf("Histogram Values - %d %d %d %d %d \n", h_bins[0], h_bins[1], h_bins[0]+h_bins[1], numElems, compareAndValue); } gpuErrchk(cudaFree(d_bins)); gpuErrchk(cudaFree(d_setOneIfOne)); free(h_setOneIfOne); gpuErrchk(cudaFree(d_possibleLocations)); free(h_scan); return 0; }
718e5366fad18ff1e8fe4931c74bfa4b16a41c85.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <algorithm> #include <vector> #include <math.h> #include <numeric> #include <hip/hip_runtime.h> #include "rocblas.h" #include "cusparse_v2.h" #include "cublas_wrapper.h" #include "misc.h" #include "preconditioner.h" #include "solver.h" /* * test.cu * * This file serves as demonstration how to use the given code and runs tests for the different implementations. * * @author Simon Schoelly */ using namespace std; int main() { int device_count; hipError_t err = hipGetDeviceCount(&device_count); if (err != hipSuccess || device_count <= 0) { cout << "no device found!" << endl; abort(); } hipblasHandle_t cublas_handle; hipsparseHandle_t cusparse_handle; hipblasCreate(&cublas_handle); hipsparseCreate(&cusparse_handle); const int m = 2048; const double alpha = 0.01f; int max_iter = 10000; double tolerance = 0.0000001; double *b, *x; hipMalloc((void **) &b, (m*m)*sizeof(double)); hipMalloc((void **) &x, (m*m)*sizeof(double)); device_memset<double>(b, 1.0, m*m); int num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, NULL ); cout << num_iter << " iterations" << endl; SpikeThomasPreconditioner<double> preconditioner(8); num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, &preconditioner ); cout << num_iter << " iterations" << endl; ThomasPreconditioner<double> preconditioner2; num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, &preconditioner2 ); cout << num_iter << " iterations" << endl; double *b_3d, *x_3d; int m_3d = 128; hipMalloc((void **) &b_3d, (m_3d*m_3d*m_3d)*sizeof(double)); hipMalloc((void **) &x_3d, (m_3d*m_3d*m_3d)*sizeof(double)); device_memset<double>(b_3d, 1.0, m*m); ThomasPreconditioner3D<double> preconditioner_3d; num_iter = solve_with_conjugate_gradient3D<double>(cublas_handle, cusparse_handle, m_3d, alpha, b_3d, x_3d, max_iter, tolerance, &preconditioner_3d); cout << num_iter << " iterations" << endl; }
718e5366fad18ff1e8fe4931c74bfa4b16a41c85.cu
#include <iostream> #include <algorithm> #include <vector> #include <math.h> #include <numeric> #include <cuda_runtime.h> #include "cublas_v2.h" #include "cusparse_v2.h" #include "cublas_wrapper.h" #include "misc.h" #include "preconditioner.h" #include "solver.h" /* * test.cu * * This file serves as demonstration how to use the given code and runs tests for the different implementations. * * @author Simon Schoelly */ using namespace std; int main() { int device_count; cudaError_t err = cudaGetDeviceCount(&device_count); if (err != cudaSuccess || device_count <= 0) { cout << "no device found!" << endl; abort(); } cublasHandle_t cublas_handle; cusparseHandle_t cusparse_handle; cublasCreate(&cublas_handle); cusparseCreate(&cusparse_handle); const int m = 2048; const double alpha = 0.01f; int max_iter = 10000; double tolerance = 0.0000001; double *b, *x; cudaMalloc((void **) &b, (m*m)*sizeof(double)); cudaMalloc((void **) &x, (m*m)*sizeof(double)); device_memset<double>(b, 1.0, m*m); int num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, NULL ); cout << num_iter << " iterations" << endl; SpikeThomasPreconditioner<double> preconditioner(8); num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, &preconditioner ); cout << num_iter << " iterations" << endl; ThomasPreconditioner<double> preconditioner2; num_iter = solve_with_conjugate_gradient<double>(cublas_handle, cusparse_handle, m, alpha, b, x, max_iter, tolerance, &preconditioner2 ); cout << num_iter << " iterations" << endl; double *b_3d, *x_3d; int m_3d = 128; cudaMalloc((void **) &b_3d, (m_3d*m_3d*m_3d)*sizeof(double)); cudaMalloc((void **) &x_3d, (m_3d*m_3d*m_3d)*sizeof(double)); device_memset<double>(b_3d, 1.0, m*m); ThomasPreconditioner3D<double> preconditioner_3d; num_iter = solve_with_conjugate_gradient3D<double>(cublas_handle, cusparse_handle, m_3d, alpha, b_3d, x_3d, max_iter, tolerance, &preconditioner_3d); cout << num_iter << " iterations" << endl; }
f6e1a571b85dc3268d1b1cdf260dc8b6221bc5f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels_hip.cuh" __global__ void setup_kernel(hiprandState_t *worker_state) { int index = threadIdx.x + blockDim.x*blockIdx.x; hiprand_init(123456789, index, 0, &worker_state[index]); } __global__ void monti_carlo_pi_kernel(hiprandState_t *worker_state, int *count, int m) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; __shared__ int cache[256]; cache[threadIdx.x] = 0; __syncthreads(); unsigned int temp = 0; while(temp < m){ float x = hiprand_uniform(&worker_state[index]); float y = hiprand_uniform(&worker_state[index]); float r = x*x + y*y; if(r <= 1){ cache[threadIdx.x]++; } temp++; } // Reduction int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } i /= 2; __syncthreads(); } // Update count if(threadIdx.x == 0){ atomicAdd(count, cache[0]); } }
f6e1a571b85dc3268d1b1cdf260dc8b6221bc5f3.cu
#include "kernels.cuh" __global__ void setup_kernel(curandState *worker_state) { int index = threadIdx.x + blockDim.x*blockIdx.x; curand_init(123456789, index, 0, &worker_state[index]); } __global__ void monti_carlo_pi_kernel(curandState *worker_state, int *count, int m) { unsigned int index = threadIdx.x + blockDim.x*blockIdx.x; __shared__ int cache[256]; cache[threadIdx.x] = 0; __syncthreads(); unsigned int temp = 0; while(temp < m){ float x = curand_uniform(&worker_state[index]); float y = curand_uniform(&worker_state[index]); float r = x*x + y*y; if(r <= 1){ cache[threadIdx.x]++; } temp++; } // Reduction int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } i /= 2; __syncthreads(); } // Update count if(threadIdx.x == 0){ atomicAdd(count, cache[0]); } }
6d2a30052b1b446644ac0cf99b6a355e428c3cff.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include "rocblas.h" #include "CudaKernel.h" //jeah i should use some classes and stuff, but for first it was much simpler //for me to just use some global variables texture<float, 3,hipReadModeElementType> CTtex; hipArray *d_volumeArray = 0; float CTmaxValue = 0; texture<float, 3,hipReadModeElementType> MRtex; hipArray *d_volumeArrayMR = 0; float MRmaxValue = 0; float* device_matrix_ptr = 0; int device_matrix_count = 0; float* device_result = NULL; float* device_output = NULL; static float sizefaktor = 2.0f; int zPowerTwo = 0; void initCuda(){ hipFree(0); } void generateCudaTexture(unsigned short* hostdata, int x, int y, int z, bool CT){ //if i want to use the full size... //i have to use a float buffer since CUDA does not support //the linear filter on int/short textures :( std::vector<float> dataasFloat; float max = 0; for(int i = 0; i < x*y*z;++i){ dataasFloat.push_back((float)hostdata[i]); if(max < dataasFloat[i]){ max = dataasFloat[i]; } } std::vector<float> dataasFloatHalf; float valu = 0; float val = 0; for(int pz = 0; pz < z-1; pz+=2){ for(int py = 0; py < y-1; py+=2){ for(int px = 0; px < x-1; px+=2){ //std::cout << px << " "<< py << " "<< pz << std::endl; val = hostdata[px + py*x + pz * x* y]; val += hostdata[px + py*x + (pz+1)* x* y]; val += hostdata[px + (py+1)*x + pz * x* y]; val += hostdata[px + (py+1)*x + (pz+1) * x* y]; val += hostdata[(px+1) + py*x + pz* x* y]; val += hostdata[(px+1) + py*x + (pz+1)* x* y]; val += hostdata[(px+1) + (py+1)*x + pz* x* y]; val += hostdata[(px+1) + (py+1)*x + (pz+1)* x* y]; val /= 8.0f; dataasFloatHalf.push_back(val); valu += val; } } } //creating the texture, only have the size since i resampled the data const hipExtent extend = make_hipExtent(x/2, y/2, z/2); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); if(CT){ hipMalloc3DArray(&d_volumeArray, &channelDesc, extend); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)&(dataasFloatHalf[0]), extend.width*sizeof(float), extend.width, extend.height); copyParams.dstArray = d_volumeArray; copyParams.extent = extend; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); CTtex.normalized = false; CTtex.filterMode = hipFilterModePoint; CTtex.addressMode[0] = hipAddressModeClamp; CTtex.addressMode[1] = hipAddressModeClamp; CTtex.addressMode[2] = hipAddressModeClamp; CTtex.normalized = true; hipBindTextureToArray(CTtex, d_volumeArray, channelDesc); CTmaxValue = max; std::cout << "CT TEXTURE VALUE "<< (int)valu << std::endl; }else{ hipMalloc3DArray(&d_volumeArrayMR, &channelDesc, extend); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)&(dataasFloatHalf[0]), extend.width*sizeof(float), extend.width, extend.height); copyParams.dstArray = d_volumeArrayMR; copyParams.extent = extend; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); MRtex.normalized = false; MRtex.filterMode = hipFilterModePoint; MRtex.addressMode[0] = hipAddressModeClamp; MRtex.addressMode[1] = hipAddressModeClamp; MRtex.addressMode[2] = hipAddressModeClamp; MRtex.normalized = true; hipBindTextureToArray(MRtex, d_volumeArrayMR, channelDesc); MRmaxValue = max; std::cout << "MR TEXTURE VALUE "<< (int)valu << std::endl; } } //DEVICE CODE -------------------------------------------------------- __global__ void substractCTandMR(float* result, const float maxX, const float maxY, const float maxZ, const float CTMax, const float MRMax, const float m11, const float m12, const float m13, const float m14, const float m21, const float m22, const float m23, const float m24, const float m31, const float m32, const float m33, const float m34){ __shared__ float sdata[512]; unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y; unsigned int z = (blockIdx.z * blockDim.z) + threadIdx.z; float3 ctPosition; ctPosition.x = (float)(x)/maxX; ctPosition.y = (float)(y)/maxY; ctPosition.z = (float)(z)/maxZ; //read the value float valueCT = tex3D(CTtex,ctPosition.x,ctPosition.y,ctPosition.z); float valueMR = 0.0f; valueCT = valueCT/CTMax; if(valueCT > 0.6f )valueCT = 0; ctPosition.x -= 0.5f; ctPosition.y -= 0.5f; ctPosition.z -= 0.5f; unsigned long index = 0; float subresult = 0; float3 mrPosition; mrPosition.x = ctPosition.x*m11+ctPosition.y*m12+ctPosition.z*m13+1*m14+0.5f; mrPosition.y = ctPosition.x*m21+ctPosition.y*m22+ctPosition.z*m23+1*m24+0.5f; mrPosition.z = ctPosition.x*m31+ctPosition.y*m32+ctPosition.z*m33+1*m34+0.5f; if(mrPosition.x >= 0.0f && mrPosition.x <= 1.0f && mrPosition.y >= 0.0f && mrPosition.y <= 1.0f && mrPosition.z >= 0.0f && mrPosition.z <= 1.0f){ //get value and normalize it valueMR = tex3D(MRtex,mrPosition.x,mrPosition.y,mrPosition.z); valueMR = valueMR/MRMax; } index = 0; index += z*(unsigned int)(maxX*maxY); index += y*(unsigned int)(maxX) + x; subresult = (valueMR-valueCT)*(valueMR-valueCT); //test use the thread reduction of the block to reduce the global memory acces /*int tid = threadIdx.z * blockDim.x *blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // calculate the 1d index sdata[tid] = subresult; __syncthreads(); for (unsigned int s=blockDim.x*blockDim.y*blockDim.z/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0){ index = blockIdx.z * 32 *32 + blockIdx.y * 32+ blockIdx.x; result[index] =sdata[0]; }*/ result[index] = subresult; } __global__ void reduce(float* g_idata, float* g_odata){ __shared__ float sdata[512]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = g_idata[i] + g_idata[i+blockDim.x]; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid < 32) { sdata[tid] += sdata[tid + 32];__syncthreads(); sdata[tid] += sdata[tid + 16];__syncthreads(); sdata[tid] += sdata[tid + 8];__syncthreads(); sdata[tid] += sdata[tid + 4];__syncthreads(); sdata[tid] += sdata[tid + 2];__syncthreads(); sdata[tid] += sdata[tid + 1]; } if(tid == 0){ g_odata[blockIdx.x] = sdata[0]; } } // HOST CODE ----------------------------------------------------------------------- static int count = 13; static std::vector<float> result_vector; void initDevice(int x, int y, int z){ zPowerTwo = 1; while(::pow(2,zPowerTwo) < z){ zPowerTwo +=1; } zPowerTwo = ::pow(2,zPowerTwo); if(device_result == NULL) hipMalloc((void**) &device_result, sizeof(float)*x*y*zPowerTwo); if(device_output == NULL) hipMalloc((void**) &device_output, sizeof(float)*x*y*zPowerTwo); result_vector.resize(count); } float* result = NULL; float sumReduce(int x, int y, int z){ int arraySize = x*y*zPowerTwo; float* temp = NULL; const int threads = 512; if(result == NULL) result = new float[threads]; while(arraySize >= threads){ arraySize = arraySize/threads/2; hipLaunchKernelGGL(( reduce), dim3(arraySize/2),dim3(threads), 0, 0, device_result,device_output); temp = device_output; device_output = device_result; device_result = temp; } hipMemcpy(&(result[0]), temp, sizeof(float)*arraySize, hipMemcpyDeviceToHost); float resultVal = 0; for(int i = 0; i < arraySize;++i){ resultVal += result[i]; } return resultVal; } const std::vector<float>& step(int x, int y, int z,float* matrix){ dim3 threadsPerBlock(8, 8, 8); dim3 numBlocks( x/threadsPerBlock.x, y/threadsPerBlock.y, z/threadsPerBlock.z); int matInd = 0; for(int i = 0; i < count;++i){ matInd = i*16; hipLaunchKernelGGL(( substractCTandMR), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, device_result, (float)x, (float)y, (float)z, CTmaxValue, MRmaxValue, matrix[matInd],matrix[matInd+4],matrix[matInd+8],matrix[matInd+12], matrix[matInd+1],matrix[matInd+5],matrix[matInd+9],matrix[matInd+13], matrix[matInd+2],matrix[matInd+6],matrix[matInd+10],matrix[matInd+14]); hipDeviceSynchronize(); result_vector[i] = sumReduce(x,y,z); hipDeviceSynchronize(); } return result_vector; }
6d2a30052b1b446644ac0cf99b6a355e428c3cff.cu
#include <stdio.h> #include <iostream> #include <vector> #include <cuda_runtime.h> #include "cublas_v2.h" #include "CudaKernel.h" //jeah i should use some classes and stuff, but for first it was much simpler //for me to just use some global variables texture<float, 3,cudaReadModeElementType> CTtex; cudaArray *d_volumeArray = 0; float CTmaxValue = 0; texture<float, 3,cudaReadModeElementType> MRtex; cudaArray *d_volumeArrayMR = 0; float MRmaxValue = 0; float* device_matrix_ptr = 0; int device_matrix_count = 0; float* device_result = NULL; float* device_output = NULL; static float sizefaktor = 2.0f; int zPowerTwo = 0; void initCuda(){ cudaFree(0); } void generateCudaTexture(unsigned short* hostdata, int x, int y, int z, bool CT){ //if i want to use the full size... //i have to use a float buffer since CUDA does not support //the linear filter on int/short textures :( std::vector<float> dataasFloat; float max = 0; for(int i = 0; i < x*y*z;++i){ dataasFloat.push_back((float)hostdata[i]); if(max < dataasFloat[i]){ max = dataasFloat[i]; } } std::vector<float> dataasFloatHalf; float valu = 0; float val = 0; for(int pz = 0; pz < z-1; pz+=2){ for(int py = 0; py < y-1; py+=2){ for(int px = 0; px < x-1; px+=2){ //std::cout << px << " "<< py << " "<< pz << std::endl; val = hostdata[px + py*x + pz * x* y]; val += hostdata[px + py*x + (pz+1)* x* y]; val += hostdata[px + (py+1)*x + pz * x* y]; val += hostdata[px + (py+1)*x + (pz+1) * x* y]; val += hostdata[(px+1) + py*x + pz* x* y]; val += hostdata[(px+1) + py*x + (pz+1)* x* y]; val += hostdata[(px+1) + (py+1)*x + pz* x* y]; val += hostdata[(px+1) + (py+1)*x + (pz+1)* x* y]; val /= 8.0f; dataasFloatHalf.push_back(val); valu += val; } } } //creating the texture, only have the size since i resampled the data const cudaExtent extend = make_cudaExtent(x/2, y/2, z/2); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); if(CT){ cudaMalloc3DArray(&d_volumeArray, &channelDesc, extend); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)&(dataasFloatHalf[0]), extend.width*sizeof(float), extend.width, extend.height); copyParams.dstArray = d_volumeArray; copyParams.extent = extend; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); CTtex.normalized = false; CTtex.filterMode = cudaFilterModePoint; CTtex.addressMode[0] = cudaAddressModeClamp; CTtex.addressMode[1] = cudaAddressModeClamp; CTtex.addressMode[2] = cudaAddressModeClamp; CTtex.normalized = true; cudaBindTextureToArray(CTtex, d_volumeArray, channelDesc); CTmaxValue = max; std::cout << "CT TEXTURE VALUE "<< (int)valu << std::endl; }else{ cudaMalloc3DArray(&d_volumeArrayMR, &channelDesc, extend); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)&(dataasFloatHalf[0]), extend.width*sizeof(float), extend.width, extend.height); copyParams.dstArray = d_volumeArrayMR; copyParams.extent = extend; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); MRtex.normalized = false; MRtex.filterMode = cudaFilterModePoint; MRtex.addressMode[0] = cudaAddressModeClamp; MRtex.addressMode[1] = cudaAddressModeClamp; MRtex.addressMode[2] = cudaAddressModeClamp; MRtex.normalized = true; cudaBindTextureToArray(MRtex, d_volumeArrayMR, channelDesc); MRmaxValue = max; std::cout << "MR TEXTURE VALUE "<< (int)valu << std::endl; } } //DEVICE CODE -------------------------------------------------------- __global__ void substractCTandMR(float* result, const float maxX, const float maxY, const float maxZ, const float CTMax, const float MRMax, const float m11, const float m12, const float m13, const float m14, const float m21, const float m22, const float m23, const float m24, const float m31, const float m32, const float m33, const float m34){ __shared__ float sdata[512]; unsigned int x = (blockIdx.x * blockDim.x) + threadIdx.x; unsigned int y = (blockIdx.y * blockDim.y) + threadIdx.y; unsigned int z = (blockIdx.z * blockDim.z) + threadIdx.z; float3 ctPosition; ctPosition.x = (float)(x)/maxX; ctPosition.y = (float)(y)/maxY; ctPosition.z = (float)(z)/maxZ; //read the value float valueCT = tex3D(CTtex,ctPosition.x,ctPosition.y,ctPosition.z); float valueMR = 0.0f; valueCT = valueCT/CTMax; if(valueCT > 0.6f )valueCT = 0; ctPosition.x -= 0.5f; ctPosition.y -= 0.5f; ctPosition.z -= 0.5f; unsigned long index = 0; float subresult = 0; float3 mrPosition; mrPosition.x = ctPosition.x*m11+ctPosition.y*m12+ctPosition.z*m13+1*m14+0.5f; mrPosition.y = ctPosition.x*m21+ctPosition.y*m22+ctPosition.z*m23+1*m24+0.5f; mrPosition.z = ctPosition.x*m31+ctPosition.y*m32+ctPosition.z*m33+1*m34+0.5f; if(mrPosition.x >= 0.0f && mrPosition.x <= 1.0f && mrPosition.y >= 0.0f && mrPosition.y <= 1.0f && mrPosition.z >= 0.0f && mrPosition.z <= 1.0f){ //get value and normalize it valueMR = tex3D(MRtex,mrPosition.x,mrPosition.y,mrPosition.z); valueMR = valueMR/MRMax; } index = 0; index += z*(unsigned int)(maxX*maxY); index += y*(unsigned int)(maxX) + x; subresult = (valueMR-valueCT)*(valueMR-valueCT); //test use the thread reduction of the block to reduce the global memory acces /*int tid = threadIdx.z * blockDim.x *blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // calculate the 1d index sdata[tid] = subresult; __syncthreads(); for (unsigned int s=blockDim.x*blockDim.y*blockDim.z/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0){ index = blockIdx.z * 32 *32 + blockIdx.y * 32+ blockIdx.x; result[index] =sdata[0]; }*/ result[index] = subresult; } __global__ void reduce(float* g_idata, float* g_odata){ __shared__ float sdata[512]; // each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = g_idata[i] + g_idata[i+blockDim.x]; __syncthreads(); // do reduction in shared mem for (unsigned int s=blockDim.x/2; s>32; s>>=1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } if (tid < 32) { sdata[tid] += sdata[tid + 32];__syncthreads(); sdata[tid] += sdata[tid + 16];__syncthreads(); sdata[tid] += sdata[tid + 8];__syncthreads(); sdata[tid] += sdata[tid + 4];__syncthreads(); sdata[tid] += sdata[tid + 2];__syncthreads(); sdata[tid] += sdata[tid + 1]; } if(tid == 0){ g_odata[blockIdx.x] = sdata[0]; } } // HOST CODE ----------------------------------------------------------------------- static int count = 13; static std::vector<float> result_vector; void initDevice(int x, int y, int z){ zPowerTwo = 1; while(std::pow(2,zPowerTwo) < z){ zPowerTwo +=1; } zPowerTwo = std::pow(2,zPowerTwo); if(device_result == NULL) cudaMalloc((void**) &device_result, sizeof(float)*x*y*zPowerTwo); if(device_output == NULL) cudaMalloc((void**) &device_output, sizeof(float)*x*y*zPowerTwo); result_vector.resize(count); } float* result = NULL; float sumReduce(int x, int y, int z){ int arraySize = x*y*zPowerTwo; float* temp = NULL; const int threads = 512; if(result == NULL) result = new float[threads]; while(arraySize >= threads){ arraySize = arraySize/threads/2; reduce<<<arraySize/2,threads>>>(device_result,device_output); temp = device_output; device_output = device_result; device_result = temp; } cudaMemcpy(&(result[0]), temp, sizeof(float)*arraySize, cudaMemcpyDeviceToHost); float resultVal = 0; for(int i = 0; i < arraySize;++i){ resultVal += result[i]; } return resultVal; } const std::vector<float>& step(int x, int y, int z,float* matrix){ dim3 threadsPerBlock(8, 8, 8); dim3 numBlocks( x/threadsPerBlock.x, y/threadsPerBlock.y, z/threadsPerBlock.z); int matInd = 0; for(int i = 0; i < count;++i){ matInd = i*16; substractCTandMR<<<numBlocks,threadsPerBlock>>>(device_result, (float)x, (float)y, (float)z, CTmaxValue, MRmaxValue, matrix[matInd],matrix[matInd+4],matrix[matInd+8],matrix[matInd+12], matrix[matInd+1],matrix[matInd+5],matrix[matInd+9],matrix[matInd+13], matrix[matInd+2],matrix[matInd+6],matrix[matInd+10],matrix[matInd+14]); cudaDeviceSynchronize(); result_vector[i] = sumReduce(x,y,z); cudaDeviceSynchronize(); } return result_vector; }
2a0e657ec8725a7ba1223cf9551804a36c08600a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id$ ************************************************************************/ /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <wgl.h> #include <wgl_cuda_runtime.h> #include <iostream> #include <assert.h> using namespace std; #ifndef __func__ # if defined(__FUNCTION__) # define __func__ __FUNCTION__ # elif defined(__PRETTY_FUNCTION__) # define __func__ __PRETTY_FUNCTION__ # else # define __func__ __FILE__ # endif #else # define __func__ "unknown" #endif #ifdef DEBUG #if PRINT_DEBUG_LINE_NUMBERSQ #define PRINT_DBG_LINENO \ std::cout << "--- On line "<< __LINE__ << \ " in " << __FILE__ << " ---" << std::endl #define PRINT_DBG_END \ std::cout << std::endl << "----" << std::endl #else #define PRINT_DBG_LINENO #define PRINT_DBG_END \ std::cout << std::endl #endif /* PRINT_DEBUG_LINE_NUMBERSQ */ #define DEBUG_MSG(...) \ PRINT_DBG_LINENO; \ std::cout << "=== " << __VA_ARGS__; \ std::cout << " ==="; \ PRINT_DBG_END #else #define DEBUG_MSG(...) #endif #ifndef New #define New(to, type, n) to = (type *) wglData->alloc(n); assert(to != NULL) #endif /* New */ #ifndef Free #define Free(ptr) wglData->free(ptr); ptr = NULL #endif /* New */ #ifdef CONFIG_USE_DOUBLE_PRECISION #define Real_t double #define WGL_Real_t WGL_Type_Double #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsDouble #else #define Real_t float #define WGL_Real_t WGL_Type_Float #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsFloat #endif #define wglState (wglData->state) #define wglErr (wglData->getError(wglData)) #define WGL_SuccessQ (wglErr->code == WGL_Success) #define WGL_FailQ (!WGL_SuccessQ) #define WGL_Type_RealQ(mem) ((mem)->type == WGL_Real_t) #define WGL_SAFE_CALL(stmt, jmp) stmt; if (WGL_FailQ) { goto jmp; } #if CONFIG_USE_DOUBLE_PRECISION #define _exp(x) exp(static_cast<Real_t>(x)) #define _abs(x) abs(static_cast<Real_t>(x)) #define _log(x) log(static_cast<Real_t>(x)) #define _log10(x) log10(static_cast<Real_t>(x)) #define _sqrt(x) sqrt(static_cast<Real_t>(x)) #define _fmin(x) fmin(static_cast<Real_t>(x), static_cast<Real_t>(x)) #else /* CONFIG_USE_DOUBLE_PRECISION */ #define _exp(x) expf(static_cast<Real_t>(x)) #define _abs(x) fabs(static_cast<Real_t>(x)) #define _log(x) logf(static_cast<Real_t>(x)) #define _log10(x) log10f(static_cast<Real_t>(x)) #define _sqrt(x) sqrtf(static_cast<Real_t>(x)) #define _fmin(x) fminf(static_cast<Real_t>(x), static_cast<Real_t>(x)) #endif /* CONFIG_USE_DOUBLE_PRECISION */ /****************************************************/ /* Black Scholes / Analytic Options Pricing */ /****************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/BlackScholes **/ // toCalculate Defines: #define VALUE 0 //* #define DELTA 1 //* #define VEGA 2 //* #define THETA 3 //* #define RHO 4 //* #define GAMMA 5 //* These are the values calculated by FinancialDerivative, so highest priority. #define VANNA 6 // #define CHARM 7 // #define VOMMA 8 // #define DVEGADTIME 9 // #define SPEED 10 // #define ZOMMA 11 // Everything with a comment after is supported thus far #define COLOR 12 // // OptionType defines #define EUROPEAN 100 #define AMERICAN 101 #define ASIAN 102 #define BARRIERUPIN 103 #define BARRIERDOWNIN 104 #define BARRIERUPOUT 105 #define BARRIERDOWNOUT 106 #define LOOKBACKFIXED 107 #define LOOKBACKFLOATING 108 #define ASIANGEOMETRIC 109 WolframGPULibraryData wglData = NULL; // Approximate cumulative normal distribution function with a polynomial __device__ inline Real_t cndGPU(Real_t d) { const Real_t A1 = static_cast<Real_t>(0.31938153); const Real_t A2 = static_cast<Real_t>(-0.356563782); const Real_t A3 = static_cast<Real_t>(1.781477937); const Real_t A4 = static_cast<Real_t>(-1.821255978); const Real_t A5 = static_cast<Real_t>(1.330274429); const Real_t RSQRT2PI = static_cast<Real_t>(0.39894228040143267793994605993438); Real_t K = static_cast<Real_t>(1.0 / (1.0 + 0.2316419 * _abs(d))); Real_t cnd = RSQRT2PI * _exp(-static_cast<Real_t>(0.5) * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = 1 - cnd; return cnd; } __device__ inline Real_t pndGPU(Real_t d) { // Do something like above eventually? const Real_t RSQRT2PI = static_cast<Real_t>(0.39894228040143267793994605993438); const Real_t dsqby2 = d*d*static_cast<Real_t>(0.5); return _exp(-dsqby2)*RSQRT2PI; } //Computes CallResult and PutResult // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesValueGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expRT, expDT; Real_t d1, d2, CNDD1, CNDD2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); expRT = _exp(-R * T); expDT = _exp(-D * T); CallResult = S * expDT * CNDD1 - X * expRT * CNDD2; PutResult = X * expRT * (static_cast<Real_t>(1.0) - CNDD2) - S * expDT * (static_cast<Real_t>(1.0) - CNDD1); } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesDeltaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT; Real_t d1, CNDD1; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * _sqrt(T)); CNDD1 = cndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * CNDD1; PutResult = expDT * (CNDD1 - static_cast<Real_t>(1.0)); } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVegaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT; Real_t d1, PNDD1; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * _sqrt(T)); PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = S * _sqrt(T) * expDT * PNDD1; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesThetaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t invSqrtT, sqrtT, expDT, expRT; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2, PNDD1; invSqrtT = rsqrtf(T); sqrtT = static_cast<Real_t>(1.0)/invSqrtT; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expDT = _exp(-D*T); expRT = _exp(-R*T); CallResult = (-V * S * expDT * PNDD1 * invSqrtT * static_cast<Real_t>(0.5)) + D * S * CNDD1 * expDT - R * X * CNDD2 * expRT; PutResult = (-V * S * expDT * PNDD1 * invSqrtT * static_cast<Real_t>(0.5)) - D * S * CNDnD1 * expDT + R * X * CNDnD2 * expRT; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesRhoGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expRT, sqrtT; Real_t d2, CNDD2, CNDnD2; sqrtT = _sqrt(T); d2 = (_log(S / X) + (R - D - static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); CNDD2 = cndGPU(d2); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); CallResult = X * T * expRT * CNDD2; PutResult = -X * T * expRT * CNDnD2; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesGammaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, invVolSqrtT; Real_t d1, PNDD1; invVolSqrtT = rsqrtf(T) / V; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) * invVolSqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * PNDD1 * invVolSqrtT / S; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVannaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, sqrtT; Real_t d1, d2, PNDD1; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = -expDT * PNDD1 * d2 / V; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesCharmGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, sqrtT; Real_t d1, d2, PNDD1, CNDD1, CNDnD1; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); CNDD1 = cndGPU(d1); CNDnD1 = cndGPU(-d1); expDT = _exp(-D*T); CallResult = -D*expDT*CNDD1 + expDT*PNDD1*(static_cast<Real_t>(2.0)*(R-D)*T - d2*V*sqrtT) / (static_cast<Real_t>(2.0)*V*T*sqrtT); PutResult = CallResult + D*expDT*CNDD1 + D*expDT*CNDnD1; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesSpeedGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t invVolSqrtT, callGamma, putGamma; Real_t d1; invVolSqrtT = static_cast<Real_t>(1.0)/V * rsqrtf(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) * invVolSqrtT; BlackScholesGammaGPU(callGamma, putGamma, S, X, T, R, D, V); CallResult = (-callGamma / S) * (d1 * invVolSqrtT + static_cast<Real_t>(1.0)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesZommaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { // Not terribly efficient. Real_t sqrtT, gammaCall, gammaPut; Real_t d1, d2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; BlackScholesGammaGPU(gammaCall, gammaPut, S, X, T, R, D, V); CallResult = gammaCall * ((d1*d2 - static_cast<Real_t>(1.0))/V); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesColorGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expDT; Real_t d1, d2, PNDD1; const Real_t one = 1; const Real_t two = 2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * PNDD1 / (two * S * T * V * sqrtT) * (two * D * T + one + (two*(R-D)*T - d2*V*sqrtT) * d1 / (V * sqrtT)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesDvegaDtimeGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expDT; Real_t d1, d2, PNDD1; const Real_t one = 1; const Real_t two = 2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = S*expDT*PNDD1*sqrtT* (D + ((R-D)*d1)/(V*sqrtT) - (one + d1*d2)/(two*T)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVommaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, callVega, putVega; Real_t d1, d2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; BlackScholesVegaGPU(callVega, putVega, S, X, T, R, D, V); CallResult = callVega * (d1 * d2) / V; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ void AsianGeometricCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t adjVol, adjDiv; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2; Real_t sqrtT, expRT, expBRT; sqrtT = _sqrt(T); adjVol = V * static_cast<Real_t>(0.577350269); // V / sqrt(3) adjDiv = static_cast<Real_t>(0.5) * (R - D - V*V*static_cast<Real_t>(0.1666666666)); // (0.5 * (R - D - V*V/6)) d1 = (_log(S / X) + (adjDiv + static_cast<Real_t>(0.5)*adjVol*adjVol) * T) / (adjVol * sqrtT); d2 = d1 - adjVol*sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); expBRT = _exp((adjDiv - R) * T); CallResult = S * expBRT * CNDD1 - X * expRT * CNDD2; PutResult = X * expRT * CNDnD2 - S * expBRT * CNDnD1; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility, H: barrier, eta and phi are +/- 1 based on the type of barrier. __device__ void BarrierCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V, Real_t H, Real_t rebate, Real_t eta, Real_t phi, unsigned int bType) { Real_t sqrtT, invVolSqrtT, mu, lambda, z, x1, x2, y1, y2, expDT, expRT; Real_t AA, BB, CC, DD, EE, FF; const Real_t one = static_cast<Real_t>(1.0); const Real_t two = static_cast<Real_t>(2.0); sqrtT = _sqrt(T); invVolSqrtT = static_cast<Real_t>(1.0)/(V * sqrtT); mu = (R - D - static_cast<Real_t>(0.5)*V*V)/(V * V); lambda = (mu * mu + (static_cast<Real_t>(2.0) * R)/(V * V)); z = _log(H / S) * invVolSqrtT + lambda * V * sqrtT; x1 = _log(S / X) * invVolSqrtT + (one + mu) * V * sqrtT; x2 = _log(S / H) * invVolSqrtT + (one + mu) * V * sqrtT; y1 = _log((H*H)/(S*X)) * invVolSqrtT + (one + mu) * V * sqrtT; y2 = _log(H / S) * invVolSqrtT + (one + mu) * V * sqrtT; expDT = _exp(-D*T); expRT = _exp(-R*T); AA = phi * S * expDT * cndGPU(phi*x1) - phi * X * expRT * cndGPU(phi * x1 - phi * V * sqrtT); BB = phi * S * expDT * cndGPU(phi*x2) - phi * X * expRT * cndGPU(phi * x2 - phi * V * sqrtT); CC = phi * S * expDT * pow(H/S, two*mu + two) * cndGPU(eta*y1) - phi * X * expRT * pow(H/S, two*mu) * cndGPU(eta * y1 - eta * V * sqrtT); DD = phi * S * expDT * pow(H/S, two*mu + two) * cndGPU(eta*y2) - phi * X * expRT * pow(H/S, two*mu) * cndGPU(eta * y2 - eta * V * sqrtT); EE = rebate * expRT * (cndGPU(eta * x2 - eta * V * sqrtT) - pow(H/S, two*mu) * cndGPU(eta * y2 - eta * V * sqrtT)); FF = rebate * (pow(H/S, mu + lambda) * cndGPU(eta * z) + pow(H/S, mu - lambda) * cndGPU(eta * z - two * eta * lambda * V * sqrtT)); switch(bType) { case BARRIERDOWNIN: if(X > H) { CallResult = CC + EE; PutResult = BB - CC + DD + EE; } else { CallResult = AA - BB + DD + EE; PutResult = AA + EE; } break; case BARRIERDOWNOUT: if(X > H) { CallResult = AA - CC + FF; PutResult = AA - BB + CC - DD + FF; } else { CallResult = BB - DD + FF; PutResult = FF; } break; case BARRIERUPIN: if(X > H) { CallResult = AA + EE; PutResult = AA - BB + DD + EE; } else { CallResult = BB - CC + DD + EE; PutResult = CC + EE; } break; case BARRIERUPOUT: if(X > H) { CallResult = FF; PutResult = BB - DD + FF; } else { CallResult = AA - BB + CC - DD + FF; PutResult = AA - CC + FF; } break; } } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility, lbType: Specifies floating or fixed lookback __device__ void LookbackCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V, unsigned int lbType) { Real_t sqrtT, expRT, expDT, expRDT; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2, K; K = max(S, X); sqrtT = _sqrt(T); d1 = (_log(S/X) + (R + D - static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); expDT = _exp(-D*T); expRDT = _exp((R-D)*T); if(lbType == LOOKBACKFIXED) { CallResult = expRT * max(S - X, static_cast<Real_t>(0.0)) + S * expDT * CNDD1 - K * expRT * CNDD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * (-pow(S/K, static_cast<Real_t>(-2.0) * (R - D) / (V * V))) * cndGPU(d1 - static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) + expRDT * CNDD1; PutResult = expRT * max(X - S, static_cast<Real_t>(0.0)) - S * expDT * CNDnD1 + K * expRT * CNDnD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * (pow(S/K, static_cast<Real_t>(-2.0) * (R - D) / (V * V))) * cndGPU(-d1 + static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) - expRDT * CNDnD1; } else { CallResult = S * expDT * CNDD1 - S * expRT * CNDD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * cndGPU(-d1 + static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) - expRDT * CNDnD1; PutResult = -S * expDT * CNDnD1 + S * expRT * CNDnD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * cndGPU(d1 - static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) + expRDT * CNDD1; } } __global__ void BlackScholesGPU(Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, int optN, int toCalculate) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; /*for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesValueGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); }*/ switch(toCalculate) { case VALUE: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesValueGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case DELTA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesDeltaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VEGA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVegaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case THETA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesThetaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case RHO: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesRhoGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case GAMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesGammaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VANNA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVannaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case CHARM: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesCharmGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VOMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVommaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case DVEGADTIME: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesDvegaDtimeGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case SPEED: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesSpeedGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case ZOMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesZommaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case COLOR: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesColorGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; default: for(int opt = tid; opt < optN; opt += THREAD_N) { d_CallResult[opt] = static_cast<Real_t>(0.0); d_PutResult[opt] = static_cast<Real_t>(0.0); } break; } } __global__ void AsianGeometricAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int opt = tid; opt < optN; opt += THREAD_N) { AsianGeometricCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } } __global__ void BarrierAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, Real_t *d_Barrier, Real_t *d_Rebate, unsigned int barrierType, int callOrPut) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; Real_t eta, phi; switch(barrierType) { case BARRIERDOWNIN: eta = static_cast<Real_t>(1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERDOWNOUT: eta = static_cast<Real_t>(1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERUPIN: eta = static_cast<Real_t>(-1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERUPOUT: eta = static_cast<Real_t>(-1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; } for(int opt = tid; opt < optN; opt += THREAD_N) { BarrierCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt], d_Barrier[opt], d_Rebate[opt], eta, phi, barrierType); } } __global__ void LookbackAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, unsigned int lookbackType) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int opt = tid; opt < optN; opt += THREAD_N) { LookbackCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt], lookbackType); } } static int iBlackScholes(WGL_Memory_t call, WGL_Memory_t put, WGL_Memory_t spot, WGL_Memory_t strikePrice, WGL_Memory_t expiration, WGL_Memory_t interest, WGL_Memory_t volatility, WGL_Memory_t dividend, WGL_Memory_t barrier, WGL_Memory_t rebate, mint numOptions, mint calculationType, mint optionType, int callOrPut) { mbool barrierQ = False; dim3 blockDim(128); dim3 gridDim(512); if (!(WGL_Type_RealQ(call) && WGL_Type_RealQ(put) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strikePrice) && WGL_Type_RealQ(expiration) && WGL_Type_RealQ(interest) && WGL_Type_RealQ(volatility) && WGL_Type_RealQ(dividend))) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, call, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, put, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strikePrice, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, expiration, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, interest, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, dividend, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, volatility, wglErr), cleanup); if (calculationType >= 0) { hipLaunchKernelGGL(( BlackScholesGPU), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), numOptions, calculationType ); } else if (optionType == ASIANGEOMETRIC) { hipLaunchKernelGGL(( AsianGeometricAnalyticGPU), dim3(gridDim), dim3(blockDim), 0, 0, numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility) ); } else if (optionType == LOOKBACKFIXED || optionType == LOOKBACKFLOATING) { hipLaunchKernelGGL(( LookbackAnalyticGPU), dim3(gridDim), dim3(blockDim), 0, 0, numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), optionType ); } else { barrierQ = True; WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, barrier, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, rebate, wglErr), cleanup); hipLaunchKernelGGL(( BarrierAnalyticGPU), dim3(gridDim), dim3(blockDim), 0, 0, numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(rebate), optionType, callOrPut ); } CUDA_Runtime_synchronize(wglErr); cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, call, wglErr); CUDA_Runtime_setMemoryAsValidOutput(wglState, put, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, call, wglErr); CUDA_Runtime_setMemoryAsInvalidOutput(wglState, put, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strikePrice, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, expiration, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, interest, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, volatility, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, dividend, wglErr); if (barrierQ) { CUDA_Runtime_unsetMemoryAsInput(wglState, rebate, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, barrier, wglErr); } if (WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else { return LIBRARY_FUNCTION_ERROR; } } EXTERN_C DLLEXPORT int oBlackScholes(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t callMemory, putMemory, currentPriceMemory, strikePriceMemory, expirationMemory, interestMemory; WGL_Memory_t volatilityMemory, dividendMemory, barrierMemory, rebateMemory; mint callMemoryId, putMemoryId, currentPriceMemoryId, strikePriceMemoryId, expirationMemoryId, interestMemoryId; mint volatilityMemoryId, dividendMemoryId, barrierMemoryId, rebateMemoryId; mint numOptions, calculationType, optionType, callOrPut; int err; assert(Argc == 16); callMemoryId = MArgument_getInteger(Args[0]); putMemoryId = MArgument_getInteger(Args[1]); currentPriceMemoryId = MArgument_getInteger(Args[2]); strikePriceMemoryId = MArgument_getInteger(Args[3]); expirationMemoryId = MArgument_getInteger(Args[4]); interestMemoryId = MArgument_getInteger(Args[5]); volatilityMemoryId = MArgument_getInteger(Args[6]); dividendMemoryId = MArgument_getInteger(Args[7]); barrierMemoryId = MArgument_getInteger(Args[8]); rebateMemoryId = MArgument_getInteger(Args[9]); numOptions = MArgument_getInteger(Args[10]); calculationType = MArgument_getInteger(Args[11]); optionType = MArgument_getInteger(Args[12]); callOrPut = MArgument_getInteger(Args[13]); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); callMemory = wglData->findMemory(wglData, callMemoryId); putMemory = wglData->findMemory(wglData, putMemoryId); currentPriceMemory = wglData->findMemory(wglData, currentPriceMemoryId); strikePriceMemory = wglData->findMemory(wglData, strikePriceMemoryId); expirationMemory = wglData->findMemory(wglData, expirationMemoryId); interestMemory = wglData->findMemory(wglData, interestMemoryId); volatilityMemory = wglData->findMemory(wglData, volatilityMemoryId); dividendMemory = wglData->findMemory(wglData, dividendMemoryId); barrierMemory = wglData->findMemory(wglData, barrierMemoryId); rebateMemory = wglData->findMemory(wglData, rebateMemoryId); err = iBlackScholes(callMemory, putMemory, currentPriceMemory, strikePriceMemory, expirationMemory, interestMemory, volatilityMemory, dividendMemory, barrierMemory, rebateMemory, numOptions, calculationType, optionType, callOrPut); cleanup: if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } /************************************************/ /* Binomial Method Options Pricing */ /************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/binomialOptions **/ #define CACHE_DELTA 32 #define CACHE_SIZE 256 #define CACHE_STEP (CACHE_SIZE - CACHE_DELTA) #define NUM_STEPS 128 __device__ inline Real_t expiryCallValue(Real_t S, Real_t X, Real_t vDt, float callPutFactor, int i) { Real_t d = S * _exp(vDt * (NUM_STEPS - static_cast<Real_t>(2.0) * i)) - X; d *= callPutFactor; return (d > static_cast<Real_t>(0.0)) ? d : static_cast<Real_t>(0.0); } __global__ void binomialOptionsKernel(Real_t* d_CallValue, Real_t* d_CallBuffer, Real_t* d_S, Real_t* d_X, Real_t* d_vDt, Real_t* d_puByDf, Real_t* d_pdByDf, int optType, int call) { __shared__ Real_t callA[CACHE_SIZE+1]; __shared__ Real_t callB[CACHE_SIZE+1]; Real_t *const d_Call = &d_CallBuffer[blockIdx.x * (NUM_STEPS + 16)]; const int tid = threadIdx.x; const Real_t S = d_S[blockIdx.x]; const Real_t X = d_X[blockIdx.x]; const Real_t vDt = d_vDt[blockIdx.x]; const Real_t puByDf = d_puByDf[blockIdx.x]; const Real_t pdByDf = d_pdByDf[blockIdx.x]; const Real_t callPutFactor = call == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); Real_t callValue, temp, currentVal; for(int i = tid; i <= NUM_STEPS; i+= CACHE_SIZE) d_Call[i] = expiryCallValue(S, X, vDt, callPutFactor, i); for(int i = NUM_STEPS; i > 0; i -= CACHE_DELTA) { for(int c_base = 0; c_base < i; c_base += CACHE_STEP) { int c_start = min(CACHE_SIZE - 1, i - c_base); int c_end = c_start - CACHE_DELTA; __syncthreads(); if(tid <= c_start) callA[tid] = d_Call[c_base + tid]; currentVal = vDt * static_cast<Real_t>(i - 2*(c_base + tid) - 1); for(int k = c_start - 1; k >= c_end;) { __syncthreads(); callValue = pdByDf * callA[tid+1] + puByDf * callA[tid]; if(optType == AMERICAN) { temp = S * _exp(currentVal) - X; temp *= callPutFactor; callValue = callValue > temp ? callValue : temp; } callB[tid] = callValue; k--; currentVal -= vDt; __syncthreads(); callValue = pdByDf * callB[tid+1] + puByDf * callB[tid]; if(optType == AMERICAN) { temp = S * _exp(currentVal) - X; temp *= callPutFactor; callValue = callValue > temp ? callValue : temp; } callA[tid] = callValue; k--; currentVal -= vDt; } __syncthreads(); if(tid <= c_end) d_Call[c_base + tid] = callA[tid]; } } if(threadIdx.x == 0) d_CallValue[blockIdx.x] = static_cast<Real_t>(callA[0]); } static int iBinomialMethod(WGL_Memory_t priceRes, WGL_Memory_t spot, WGL_Memory_t strike, WGL_Memory_t buffer, WGL_Memory_t vDt, WGL_Memory_t puByDf, WGL_Memory_t pdByDf, mint numOptions, mint optionType, mint callOrPut) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim(numOptions); if (!(WGL_Type_RealQ(priceRes) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strike) && WGL_Type_RealQ(buffer) && WGL_Type_RealQ(vDt) && WGL_Type_RealQ(puByDf) && WGL_Type_RealQ(pdByDf))) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, priceRes, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strike, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, buffer, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, vDt, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, puByDf, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, pdByDf, wglErr), cleanup); hipLaunchKernelGGL(( binomialOptionsKernel), dim3(gridDim), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(buffer), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(vDt), CUDA_Runtime_getDeviceMemoryAsReal(puByDf), CUDA_Runtime_getDeviceMemoryAsReal(pdByDf), optionType, callOrPut ); CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, priceRes, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, priceRes, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strike, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, vDt, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, puByDf, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, pdByDf, wglErr); return err; } EXTERN_C DLLEXPORT int oBinomialMethod(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t resMem, currentPriceMem, strikeMem, expirationMem, interestMem, volatilityMem, dividendMem; mint resMemId, currentPriceMemId, strikeMemId, expirationMemId, interestMemId, volatilityMemId, dividendMemId; mint numOptions, optionType, callOrPut; mint numSteps = 128;//treeDepth; Real_t * dt = NULL, * vDt = NULL, * rDt = NULL, * If = NULL, * df = NULL; Real_t * u = NULL, * d = NULL, * pu = NULL, * pd = NULL, * puByDf = NULL, * pdByDf = NULL; Real_t * hCallBuffer = NULL; WGL_Memory_t callBufferMem = NULL, vDtMem = NULL, puByDfMem = NULL, pdByDfMem = NULL; int err = LIBRARY_NO_ERROR; double * hExpiration, * hVolatility, * hInterest, * hDividend; assert(Argc == 12); resMemId = MArgument_getInteger(Args[0]); currentPriceMemId = MArgument_getInteger(Args[1]); strikeMemId = MArgument_getInteger(Args[2]); expirationMemId = MArgument_getInteger(Args[3]); interestMemId = MArgument_getInteger(Args[4]); volatilityMemId = MArgument_getInteger(Args[5]); dividendMemId = MArgument_getInteger(Args[6]); numOptions = MArgument_getInteger(Args[7]); optionType = MArgument_getInteger(Args[8]); callOrPut = MArgument_getInteger(Args[9]); resMem = wglData->findMemory(wglData, resMemId); currentPriceMem = wglData->findMemory(wglData, currentPriceMemId); strikeMem = wglData->findMemory(wglData, strikeMemId); expirationMem = wglData->findMemory(wglData, expirationMemId); interestMem = wglData->findMemory(wglData, interestMemId); volatilityMem = wglData->findMemory(wglData, volatilityMemId); dividendMem = wglData->findMemory(wglData, dividendMemId); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); New(dt, Real_t, numOptions * sizeof(Real_t)); New(vDt, Real_t, numOptions * sizeof(Real_t)); New(rDt, Real_t, numOptions * sizeof(Real_t)); New(If, Real_t, numOptions * sizeof(Real_t)); New(df, Real_t, numOptions * sizeof(Real_t)); New(u, Real_t, numOptions * sizeof(Real_t)); New(d, Real_t, numOptions * sizeof(Real_t)); New(pu, Real_t, numOptions * sizeof(Real_t)); New(pd, Real_t, numOptions * sizeof(Real_t)); New(puByDf, Real_t, numOptions * sizeof(Real_t)); New(pdByDf, Real_t, numOptions * sizeof(Real_t)); hExpiration = wglData->MTensorMemory_getRealData(wglData, expirationMem); assert(hExpiration != NULL); hVolatility = wglData->MTensorMemory_getRealData(wglData, volatilityMem); assert(hVolatility != NULL); hInterest = wglData->MTensorMemory_getRealData(wglData, interestMem); assert(hInterest != NULL); hDividend = wglData->MTensorMemory_getRealData(wglData, dividendMem); assert(hDividend != NULL); New(hCallBuffer, Real_t, numOptions * (numSteps + 16) * sizeof(Real_t)); // We need to calculate pseudoprobabilities that the price of the asset will go up or down, as well as the amount it will go up or down. for (mint ii = 0; ii < numOptions; ii++) { // Width of a time step dt[ii] = static_cast<Real_t>(hExpiration[ii]) / static_cast<Real_t>(numSteps); // Volatility multiplied by square root of the timestep -- comes up in simulating brownian motion vDt[ii] = static_cast<Real_t>(hVolatility[ii]) * sqrt(dt[ii]); // Used to account for the rate of risk free interest and the dividends of the asset rDt[ii] = static_cast<Real_t>(hInterest[ii] - hDividend[ii]) * dt[ii]; // As above [these could probably be combined into one step] If[ii] = exp(rDt[ii]); // Used to account for just risk free interest df[ii] = exp(static_cast<Real_t>(-hInterest[ii] * dt[ii])); // Amount increased (u) or decreased (d) at each time step u[ii] = exp(vDt[ii]); d[ii] = exp(-vDt[ii]); // Pseudoprobability of increase (pu) or decrease (pd) pu[ii] = (If[ii] - d[ii]) / (u[ii] - d[ii]); pd[ii] = 1.0f - pu[ii]; // Multiply by df to adjust for risk free interest rate. puByDf[ii] = pu[ii] * df[ii]; pdByDf[ii] = pd[ii] * df[ii]; } callBufferMem = wglData->newRawMemory(wglData, (void**)&hCallBuffer, WGL_MemoryResidence_DeviceHost, numOptions * (numSteps + 16) * sizeof(Real_t), True); callBufferMem->type = WGL_Real_t; assert(WGL_SuccessQ); vDtMem = wglData->newRawMemory(wglData, (void**)&vDt, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); vDtMem->type = WGL_Real_t; assert(WGL_SuccessQ); puByDfMem = wglData->newRawMemory(wglData, (void**)&puByDf, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); puByDfMem->type = WGL_Real_t; assert(WGL_SuccessQ); pdByDfMem = wglData->newRawMemory(wglData, (void**)&pdByDf, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); pdByDfMem->type = WGL_Real_t; assert(WGL_SuccessQ); err = iBinomialMethod(resMem, currentPriceMem, strikeMem, callBufferMem, vDtMem, puByDfMem, pdByDfMem, numOptions, optionType, callOrPut); cleanup: Free(dt); Free(rDt); Free(If); Free(df); Free(u); Free(d); Free(pu); Free(pd); wglData->freeMemory(wglData, callBufferMem); wglData->freeMemory(wglData, vDtMem); wglData->freeMemory(wglData, puByDfMem); wglData->freeMemory(wglData, pdByDfMem); if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } /************************************************/ /* Binomial Method Options Pricing */ /************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/binomialOptions **/ #define THREAD_N 256 // Barrier types (masks, eg a down and out option has type 3, up and in has type 0, etc.) #define BARRIER_DOWN 1 #define BARRIER_OUT 2 #define LOOKBACK_FIXED 0 #define LOOKBACK_FLOATING 1 template<unsigned int blockSize> __device__ void sumReduceSharedMem(volatile Real_t *sum, int tid) { // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sum[tid] += sum[tid + 256]; } __syncthreads();} if (blockSize >= 256) { if (tid < 128) { sum[tid] += sum[tid + 128]; } __syncthreads();} if (blockSize >= 128) { if (tid < 64) { sum[tid] += sum[tid + 64]; } __syncthreads();} if (tid < 32) { if (blockSize >= 64) { sum[tid] += sum[tid + 32]; } if (blockSize >= 32) { sum[tid] += sum[tid + 16]; } if (blockSize >= 16) { sum[tid] += sum[tid + 8]; } if (blockSize >= 8) { sum[tid] += sum[tid + 4]; } if (blockSize >= 4) { sum[tid] += sum[tid + 2]; } if (blockSize >= 2) { sum[tid] += sum[tid + 1]; } } } #define UNROLL_REDUCTION template<int SUM_N, int blockSize> __device__ void sumReduce(Real_t *sum) { #ifdef UNROLL_REDUCTION for(int pos = threadIdx.x; pos < SUM_N; pos += blockSize){ __syncthreads(); sumReduceSharedMem<blockSize>(sum, pos); } #else for(int stride = SUM_N / 2; stride > 0; stride >>= 1){ __syncthreads(); for(int pos = threadIdx.x; pos < stride; pos += blockSize){ sum[pos] += sum[pos + stride]; } } #endif } // S: spot price, X: strike price // MuByT and VBySqrtT are the mean and variance of the normal random variables used to simulate brownian motion. // d_Samples is a pool of normally distributed random samples. __device__ inline Real_t endCallValueBarrier(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, Real_t barrier, int barrierType) { Real_t value = S; unsigned int i; Real_t r = 0; Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = 0; Real_t logBarrier = _log(barrier / S); // If exponent crosses this it is equivalent to value crossing barrier. Cuts down on computation. unsigned int crossed = 0; if((exponent < logBarrier) && (barrierType & BARRIER_DOWN)) crossed = 1; if((exponent > logBarrier) && !(barrierType & BARRIER_DOWN)) crossed = 1; for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; if((exponent < logBarrier) && (barrierType & BARRIER_DOWN)) crossed = 1; if((exponent > logBarrier) && !(barrierType & BARRIER_DOWN)) crossed = 1; } value = S * _exp(exponent); Real_t callValue = value - X; callValue *= callPutFactor; if((crossed == 1) && (barrierType & BARRIER_OUT)) return static_cast<Real_t>(0.0); if((crossed == 0) && !(barrierType & BARRIER_OUT)) return static_cast<Real_t>(0.0); return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueLookback(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, int lookbackType) { Real_t maxValue = S; Real_t minValue = S; Real_t currentValue = S; unsigned int i; Real_t r = static_cast<Real_t>(0.0); Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = static_cast<Real_t>(0.0); Real_t callValue, putValue; for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; currentValue = S * _exp(exponent); if(currentValue < minValue) minValue = currentValue; if(currentValue > maxValue) maxValue = currentValue; } if(lookbackType == LOOKBACK_FLOATING) { callValue = currentValue - minValue; putValue = maxValue - currentValue; } else { callValue = maxValue - X; putValue = X - minValue; } if(callPutFactor > static_cast<Real_t>(0.0)) return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); else return (putValue > static_cast<Real_t>(0.0)) ? putValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueAsian(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN) { Real_t sum = S; unsigned int i; Real_t r = static_cast<Real_t>(0.0); Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = static_cast<Real_t>(0.0); for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; sum += S * _exp(exponent); } sum /= static_cast<Real_t>(depthN + 1); Real_t callValue = sum - X; callValue *= callPutFactor; return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueEuropean(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN) { Real_t r = d_Samples[index]; Real_t callValue = S * _exp(MuByT + VBySqrtT * r) - X; callValue *= callPutFactor; return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValue(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, int optType, Real_t barrier) { Real_t res = static_cast<Real_t>(-1.0); switch(optType) { case EUROPEAN: res = endCallValueEuropean(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN); break; case ASIAN: res = endCallValueAsian(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN); break; case BARRIERUPIN: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, 0); break; case BARRIERUPOUT: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_OUT); break; case BARRIERDOWNIN: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_DOWN); break; case BARRIERDOWNOUT: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_DOWN | BARRIER_OUT); break; case LOOKBACKFIXED: res = endCallValueLookback(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, LOOKBACK_FIXED); break; case LOOKBACKFLOATING: res = endCallValueLookback(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, LOOKBACK_FIXED); break; default: break; } return res; } __global__ void MonteCarloKernel( Real_t *d_S, Real_t *d_X, Real_t *d_MuByT, Real_t *d_VBySqrtT, Real_t *d_Barrier, Real_t *d_Buffer, Real_t *d_Samples, unsigned int pathN, unsigned int depthN, int optType, int call) { const int optionIndex = blockIdx.y; const Real_t S = d_S[optionIndex]; const Real_t X = d_X[optionIndex]; const Real_t MuByT = d_MuByT[optionIndex]; const Real_t VBySqrtT = d_VBySqrtT[optionIndex]; const Real_t Barrier = d_Barrier[optionIndex]; const Real_t callPutFactor = call ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); //One thread per partial integral const unsigned int iSum = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int accumN = blockDim.x * gridDim.x; //Cycle through the entire samples array: //derive end stock price for each path //accumulate into intermediate global memory array Real_t sumCall = static_cast<Real_t>(0.0); for(unsigned int i = iSum; i < pathN; i += accumN){ Real_t callValue = endCallValue(S, X, MuByT, VBySqrtT, i, pathN, d_Samples, callPutFactor, depthN, optType, Barrier); sumCall += callValue; } d_Buffer[optionIndex * accumN + iSum] = sumCall; } __global__ void MonteCarloOneBlockPerOption( Real_t *d_CallValue, Real_t *d_S, Real_t *d_X, Real_t *d_MuByT, Real_t *d_VBySqrtT, Real_t *d_Barrier, Real_t *d_Samples, unsigned int pathN, unsigned int depthN, int optType, int call) { const int SUM_N = THREAD_N; __shared__ Real_t s_SumCall[SUM_N]; const int optionIndex = blockIdx.x; const Real_t S = d_S[optionIndex]; const Real_t X = d_X[optionIndex]; const Real_t MuByT = d_MuByT[optionIndex]; const Real_t VBySqrtT = d_VBySqrtT[optionIndex]; const Real_t Barrier = d_Barrier[optionIndex]; const Real_t callPutFactor = call ? 1 : -1; Real_t sumCall; Real_t callValue = 0; //Cycle through the entire samples array: //derive end stock price for each path //accumulate partial integrals into intermediate shared memory buffer for(unsigned int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ sumCall = static_cast<Real_t>(0.0); for(unsigned int i = iSum; i < pathN; i += SUM_N){ callValue = endCallValue(S, X, MuByT, VBySqrtT, i, pathN, d_Samples, callPutFactor, depthN, optType, Barrier); sumCall += callValue; } s_SumCall[iSum] = sumCall; } sumReduce<SUM_N, THREAD_N>(s_SumCall); if(threadIdx.x == 0){ d_CallValue[optionIndex] = s_SumCall[0]; } } __global__ void MonteCarloReduce( Real_t *d_CallValue, Real_t *d_Buffer, int accumN) { const int SUM_N = THREAD_N; __shared__ Real_t s_SumCall[SUM_N]; Real_t *d_SumBase = &d_Buffer[blockIdx.x * accumN]; for(int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ Real_t sumCall = 0; for(int pos = iSum; pos < accumN; pos += SUM_N){ sumCall += d_SumBase[pos]; } s_SumCall[iSum] = sumCall; } if(threadIdx.x == 0){ for(int i=1; i<SUM_N; i++) s_SumCall[0] += s_SumCall[i]; d_CallValue[blockIdx.x] = s_SumCall[0]; } } __device__ inline Real_t MoroInvCNDgpu(Real_t P){ const Real_t a1 = static_cast<Real_t>(2.50662823884); const Real_t a2 = static_cast<Real_t>(-18.61500062529); const Real_t a3 = static_cast<Real_t>(41.39119773534); const Real_t a4 = static_cast<Real_t>(-25.44106049637); const Real_t b1 = static_cast<Real_t>(-8.4735109309); const Real_t b2 = static_cast<Real_t>(23.08336743743); const Real_t b3 = static_cast<Real_t>(-21.06224101826); const Real_t b4 = static_cast<Real_t>(3.13082909833); const Real_t c1 = static_cast<Real_t>(0.337475482272615); const Real_t c2 = static_cast<Real_t>(0.976169019091719); const Real_t c3 = static_cast<Real_t>(0.160797971491821); const Real_t c4 = static_cast<Real_t>(2.76438810333863E-02); const Real_t c5 = static_cast<Real_t>(3.8405729373609E-03); const Real_t c6 = static_cast<Real_t>(3.951896511919E-04); const Real_t c7 = static_cast<Real_t>(3.21767881768E-05); const Real_t c8 = static_cast<Real_t>(2.888167364E-07); const Real_t c9 = static_cast<Real_t>(3.960315187E-07); Real_t y, z; y = P - static_cast<Real_t>(0.5); if(_abs(y) < static_cast<Real_t>(0.42)){ z = y * y; z = y * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1); }else{ if(y > 0) z = _log(-_log(1 - P)); else z = _log(-_log(P)); z = c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9))))))); if(y < 0) z = -z; } return z; } // d_Samples should be filled with Uniform pseudo-random or quasi-random samples in [0,1] // Moro Inversion is used to convert Uniform to Normal[0,1] __global__ void inverseCNDKernel( Real_t *d_Samples, unsigned int pathN) { unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; unsigned int threadN = blockDim.x * gridDim.x; for(unsigned int pos = tid; pos < pathN; pos += threadN) { Real_t d = d_Samples[pos]; d_Samples[pos] = MoroInvCNDgpu(d); } } static int iMonteCarloMethod(WGL_Memory_t priceRes, WGL_Memory_t spot, WGL_Memory_t strike, WGL_Memory_t muByT, WGL_Memory_t vBySqrtT, WGL_Memory_t barrier, WGL_Memory_t buffer, WGL_Memory_t samples, mint pathN, mint depthN, mint numOptions, mint optType, mint callOrPut, mint blocksPerOption) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim1(blocksPerOption, numOptions); dim3 gridDim2(numOptions); dim3 moroBlockDim(128); dim3 moroGridDim(1); if (!(WGL_Type_RealQ(priceRes) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strike) && WGL_Type_RealQ(muByT) && WGL_Type_RealQ(vBySqrtT) && WGL_Type_RealQ(barrier) && WGL_Type_RealQ(samples))) { return LIBRARY_TYPE_ERROR; } else if (callOrPut != 1.0 && callOrPut != -1.0) { return LIBRARY_FUNCTION_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, priceRes, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strike, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, muByT, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, vBySqrtT, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, barrier, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, samples, wglErr), cleanup); hipLaunchKernelGGL(( inverseCNDKernel), dim3(moroGridDim), dim3(moroBlockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN*depthN ); CUDA_Runtime_synchronize(wglErr); if (blocksPerOption != 1) { if (!WGL_Type_RealQ(buffer)) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, buffer, wglErr), cleanup); hipLaunchKernelGGL(( MonteCarloKernel), dim3(gridDim1), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(muByT), CUDA_Runtime_getDeviceMemoryAsReal(vBySqrtT), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(buffer), CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN, depthN, optType, callOrPut ); hipLaunchKernelGGL(( MonteCarloReduce), dim3(gridDim2), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(buffer), 256 * blocksPerOption ); } else { hipLaunchKernelGGL(( MonteCarloOneBlockPerOption), dim3(gridDim2), dim3(blockDim), 0, 0, CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(muByT), CUDA_Runtime_getDeviceMemoryAsReal(vBySqrtT), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN, depthN, optType, callOrPut ); } CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, priceRes, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, priceRes, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strike, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, muByT, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, vBySqrtT, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, barrier, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, samples, wglErr); if (blocksPerOption != 1) { CUDA_Runtime_unsetMemoryAsInput(wglState, buffer, wglErr); } return err; } EXTERN_C DLLEXPORT int oMonteCarloMethod(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t resMem, currentPriceMem, strikeMem, expirationMem, interestMem, volatilityMem, dividendMem, barrierMem; mint resMemId, currentPriceMemId, strikeMemId, expirationMemId, interestMemId, volatilityMemId, dividendMemId, barrierMemId; mint numOptions, optType, callOrPut; mint pathN = 10000, depthN = 500, threadN = 256, blocksPerOption = 1; Real_t * muByT = NULL, * vBySqrtT = NULL, * hBuffer = NULL, * hSamples = NULL; WGL_Memory_t callBufferMem = NULL, muByTMem = NULL, vBySqrtTMem = NULL, samplesMem = NULL; double * hExpiration = NULL, * hVolatility = NULL, * hInterest = NULL, * hDividend = NULL, * hRes = NULL, * RT = NULL; int err = LIBRARY_NO_ERROR; assert(Argc == 13); resMemId = MArgument_getInteger(Args[0]); currentPriceMemId = MArgument_getInteger(Args[1]); strikeMemId = MArgument_getInteger(Args[2]); expirationMemId = MArgument_getInteger(Args[3]); interestMemId = MArgument_getInteger(Args[4]); volatilityMemId = MArgument_getInteger(Args[5]); dividendMemId = MArgument_getInteger(Args[6]); barrierMemId = MArgument_getInteger(Args[7]); numOptions = MArgument_getInteger(Args[8]); optType = MArgument_getInteger(Args[9]); callOrPut = MArgument_getInteger(Args[10]); resMem = wglData->findMemory(wglData, resMemId); currentPriceMem = wglData->findMemory(wglData, currentPriceMemId); strikeMem = wglData->findMemory(wglData, strikeMemId); expirationMem = wglData->findMemory(wglData, expirationMemId); interestMem = wglData->findMemory(wglData, interestMemId); volatilityMem = wglData->findMemory(wglData, volatilityMemId); dividendMem = wglData->findMemory(wglData, dividendMemId); barrierMem = wglData->findMemory(wglData, barrierMemId); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); New(muByT, Real_t, numOptions * sizeof(Real_t)); New(vBySqrtT, Real_t, numOptions * sizeof(Real_t)); New(hSamples, Real_t, pathN * depthN * sizeof(Real_t)); hExpiration = wglData->MTensorMemory_getRealData(wglData, expirationMem); assert(hExpiration != NULL); hVolatility = wglData->MTensorMemory_getRealData(wglData, volatilityMem); assert(hVolatility != NULL); hInterest = wglData->MTensorMemory_getRealData(wglData, interestMem); assert(hInterest != NULL); hDividend = wglData->MTensorMemory_getRealData(wglData, dividendMem); assert(hDividend != NULL); // The only inputs we really need for simulation are the mean (muByT) and standard deviation (vBySqrtT) for the normal random variables used to simulate brownian motion. for (mint ii = 0; ii < numOptions; ii++) { muByT[ii] = static_cast<Real_t>((hInterest[ii] - hDividend[ii] - 0.5f * hVolatility[ii]*hVolatility[ii]) * hExpiration[ii]); vBySqrtT[ii] = static_cast<Real_t>(hVolatility[ii] * sqrt(hExpiration[ii])); } // Create uniform random variables host-side, use a kernel to convert them to Normal(0,1). for (mint ii = 0; ii < pathN * depthN; ii++) { hSamples[ii] = static_cast<Real_t>(rand()) / static_cast<Real_t>(RAND_MAX); } // This determines how many blocks per option to use; it could probably be updated since I pulled it from the nvidia SDK // which did not do "real" monte carlo method (paths), so depthN was not a factor. if (pathN / numOptions >= 8192) { blocksPerOption = numOptions < 16 ? 64 : 16; New(hBuffer, Real_t, blocksPerOption * threadN * numOptions * sizeof(Real_t)); callBufferMem = wglData->newRawMemory(wglData, (void**)&hBuffer, WGL_MemoryResidence_DeviceHost, blocksPerOption * threadN * numOptions * sizeof(Real_t), True); callBufferMem->type = WGL_Real_t; assert(WGL_SuccessQ); } muByTMem = wglData->newRawMemory(wglData, (void**)&muByT, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); muByTMem->type = WGL_Real_t; assert(WGL_SuccessQ); vBySqrtTMem = wglData->newRawMemory(wglData, (void**)&vBySqrtT, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); vBySqrtTMem->type = WGL_Real_t; assert(WGL_SuccessQ); samplesMem = wglData->newRawMemory(wglData, (void**)&hSamples, WGL_MemoryResidence_DeviceHost, pathN * depthN * sizeof(Real_t), True); samplesMem->type = WGL_Real_t; assert(WGL_SuccessQ); err = iMonteCarloMethod(resMem, currentPriceMem, strikeMem, muByTMem, vBySqrtTMem, barrierMem, callBufferMem, samplesMem, pathN, depthN, numOptions, optType, callOrPut, blocksPerOption); hRes = wglData->MTensorMemory_getRealData(wglData, resMem); assert(hRes != NULL); New(RT, double, numOptions * sizeof(double)); // The output of the kernel does not average of the number of paths or adjust for inflation; we do that now. for (int ii = 0; ii < numOptions; ii++) { RT[ii] = exp(-hInterest[ii]*hExpiration[ii]); hRes[ii] *= RT[ii] / static_cast<Real_t>(pathN); } cleanup: Free(RT); wglData->freeMemory(wglData, callBufferMem); wglData->freeMemory(wglData, muByTMem); wglData->freeMemory(wglData, vBySqrtTMem); wglData->freeMemory(wglData, samplesMem); if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } WGLEXPORT int WolframGPULibrary_initialize(WolframGPULibraryData wglData0) { wglData = wglData0; return LIBRARY_NO_ERROR; } WGLEXPORT int WolframLibrary_initialize(WolframLibraryData libData) { return LIBRARY_NO_ERROR; } WGLEXPORT void WolframLibrary_uninitialize( ) { return; }
2a0e657ec8725a7ba1223cf9551804a36c08600a.cu
/*********************************************************************//** * @file * * @section LICENCE * * Mathematica source file * * Copyright 1986 through 2010 by Wolfram Research Inc. * * @section DESCRIPTION * * * * $Id$ ************************************************************************/ /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include <wgl.h> #include <wgl_cuda_runtime.h> #include <iostream> #include <assert.h> using namespace std; #ifndef __func__ # if defined(__FUNCTION__) # define __func__ __FUNCTION__ # elif defined(__PRETTY_FUNCTION__) # define __func__ __PRETTY_FUNCTION__ # else # define __func__ __FILE__ # endif #else # define __func__ "unknown" #endif #ifdef DEBUG #if PRINT_DEBUG_LINE_NUMBERSQ #define PRINT_DBG_LINENO \ std::cout << "--- On line "<< __LINE__ << \ " in " << __FILE__ << " ---" << std::endl #define PRINT_DBG_END \ std::cout << std::endl << "----" << std::endl #else #define PRINT_DBG_LINENO #define PRINT_DBG_END \ std::cout << std::endl #endif /* PRINT_DEBUG_LINE_NUMBERSQ */ #define DEBUG_MSG(...) \ PRINT_DBG_LINENO; \ std::cout << "=== " << __VA_ARGS__; \ std::cout << " ==="; \ PRINT_DBG_END #else #define DEBUG_MSG(...) #endif #ifndef New #define New(to, type, n) to = (type *) wglData->alloc(n); assert(to != NULL) #endif /* New */ #ifndef Free #define Free(ptr) wglData->free(ptr); ptr = NULL #endif /* New */ #ifdef CONFIG_USE_DOUBLE_PRECISION #define Real_t double #define WGL_Real_t WGL_Type_Double #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsDouble #else #define Real_t float #define WGL_Real_t WGL_Type_Float #define CUDA_Runtime_getDeviceMemoryAsReal CUDA_Runtime_getDeviceMemoryAsFloat #endif #define wglState (wglData->state) #define wglErr (wglData->getError(wglData)) #define WGL_SuccessQ (wglErr->code == WGL_Success) #define WGL_FailQ (!WGL_SuccessQ) #define WGL_Type_RealQ(mem) ((mem)->type == WGL_Real_t) #define WGL_SAFE_CALL(stmt, jmp) stmt; if (WGL_FailQ) { goto jmp; } #if CONFIG_USE_DOUBLE_PRECISION #define _exp(x) exp(static_cast<Real_t>(x)) #define _abs(x) abs(static_cast<Real_t>(x)) #define _log(x) log(static_cast<Real_t>(x)) #define _log10(x) log10(static_cast<Real_t>(x)) #define _sqrt(x) sqrt(static_cast<Real_t>(x)) #define _fmin(x) fmin(static_cast<Real_t>(x), static_cast<Real_t>(x)) #else /* CONFIG_USE_DOUBLE_PRECISION */ #define _exp(x) expf(static_cast<Real_t>(x)) #define _abs(x) fabs(static_cast<Real_t>(x)) #define _log(x) logf(static_cast<Real_t>(x)) #define _log10(x) log10f(static_cast<Real_t>(x)) #define _sqrt(x) sqrtf(static_cast<Real_t>(x)) #define _fmin(x) fminf(static_cast<Real_t>(x), static_cast<Real_t>(x)) #endif /* CONFIG_USE_DOUBLE_PRECISION */ /****************************************************/ /* Black Scholes / Analytic Options Pricing */ /****************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/BlackScholes **/ // toCalculate Defines: #define VALUE 0 //* #define DELTA 1 //* #define VEGA 2 //* #define THETA 3 //* #define RHO 4 //* #define GAMMA 5 //* These are the values calculated by FinancialDerivative, so highest priority. #define VANNA 6 // #define CHARM 7 // #define VOMMA 8 // #define DVEGADTIME 9 // #define SPEED 10 // #define ZOMMA 11 // Everything with a comment after is supported thus far #define COLOR 12 // // OptionType defines #define EUROPEAN 100 #define AMERICAN 101 #define ASIAN 102 #define BARRIERUPIN 103 #define BARRIERDOWNIN 104 #define BARRIERUPOUT 105 #define BARRIERDOWNOUT 106 #define LOOKBACKFIXED 107 #define LOOKBACKFLOATING 108 #define ASIANGEOMETRIC 109 WolframGPULibraryData wglData = NULL; // Approximate cumulative normal distribution function with a polynomial __device__ inline Real_t cndGPU(Real_t d) { const Real_t A1 = static_cast<Real_t>(0.31938153); const Real_t A2 = static_cast<Real_t>(-0.356563782); const Real_t A3 = static_cast<Real_t>(1.781477937); const Real_t A4 = static_cast<Real_t>(-1.821255978); const Real_t A5 = static_cast<Real_t>(1.330274429); const Real_t RSQRT2PI = static_cast<Real_t>(0.39894228040143267793994605993438); Real_t K = static_cast<Real_t>(1.0 / (1.0 + 0.2316419 * _abs(d))); Real_t cnd = RSQRT2PI * _exp(-static_cast<Real_t>(0.5) * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5))))); if(d > 0) cnd = 1 - cnd; return cnd; } __device__ inline Real_t pndGPU(Real_t d) { // Do something like above eventually? const Real_t RSQRT2PI = static_cast<Real_t>(0.39894228040143267793994605993438); const Real_t dsqby2 = d*d*static_cast<Real_t>(0.5); return _exp(-dsqby2)*RSQRT2PI; } //Computes CallResult and PutResult // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesValueGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expRT, expDT; Real_t d1, d2, CNDD1, CNDD2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); expRT = _exp(-R * T); expDT = _exp(-D * T); CallResult = S * expDT * CNDD1 - X * expRT * CNDD2; PutResult = X * expRT * (static_cast<Real_t>(1.0) - CNDD2) - S * expDT * (static_cast<Real_t>(1.0) - CNDD1); } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesDeltaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT; Real_t d1, CNDD1; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * _sqrt(T)); CNDD1 = cndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * CNDD1; PutResult = expDT * (CNDD1 - static_cast<Real_t>(1.0)); } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVegaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT; Real_t d1, PNDD1; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * _sqrt(T)); PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = S * _sqrt(T) * expDT * PNDD1; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesThetaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t invSqrtT, sqrtT, expDT, expRT; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2, PNDD1; invSqrtT = rsqrtf(T); sqrtT = static_cast<Real_t>(1.0)/invSqrtT; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expDT = _exp(-D*T); expRT = _exp(-R*T); CallResult = (-V * S * expDT * PNDD1 * invSqrtT * static_cast<Real_t>(0.5)) + D * S * CNDD1 * expDT - R * X * CNDD2 * expRT; PutResult = (-V * S * expDT * PNDD1 * invSqrtT * static_cast<Real_t>(0.5)) - D * S * CNDnD1 * expDT + R * X * CNDnD2 * expRT; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesRhoGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expRT, sqrtT; Real_t d2, CNDD2, CNDnD2; sqrtT = _sqrt(T); d2 = (_log(S / X) + (R - D - static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); CNDD2 = cndGPU(d2); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); CallResult = X * T * expRT * CNDD2; PutResult = -X * T * expRT * CNDnD2; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesGammaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, invVolSqrtT; Real_t d1, PNDD1; invVolSqrtT = rsqrtf(T) / V; d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) * invVolSqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * PNDD1 * invVolSqrtT / S; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVannaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, sqrtT; Real_t d1, d2, PNDD1; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = -expDT * PNDD1 * d2 / V; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesCharmGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t expDT, sqrtT; Real_t d1, d2, PNDD1, CNDD1, CNDnD1; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); CNDD1 = cndGPU(d1); CNDnD1 = cndGPU(-d1); expDT = _exp(-D*T); CallResult = -D*expDT*CNDD1 + expDT*PNDD1*(static_cast<Real_t>(2.0)*(R-D)*T - d2*V*sqrtT) / (static_cast<Real_t>(2.0)*V*T*sqrtT); PutResult = CallResult + D*expDT*CNDD1 + D*expDT*CNDnD1; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesSpeedGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t invVolSqrtT, callGamma, putGamma; Real_t d1; invVolSqrtT = static_cast<Real_t>(1.0)/V * rsqrtf(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) * invVolSqrtT; BlackScholesGammaGPU(callGamma, putGamma, S, X, T, R, D, V); CallResult = (-callGamma / S) * (d1 * invVolSqrtT + static_cast<Real_t>(1.0)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesZommaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { // Not terribly efficient. Real_t sqrtT, gammaCall, gammaPut; Real_t d1, d2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; BlackScholesGammaGPU(gammaCall, gammaPut, S, X, T, R, D, V); CallResult = gammaCall * ((d1*d2 - static_cast<Real_t>(1.0))/V); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesColorGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expDT; Real_t d1, d2, PNDD1; const Real_t one = 1; const Real_t two = 2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = expDT * PNDD1 / (two * S * T * V * sqrtT) * (two * D * T + one + (two*(R-D)*T - d2*V*sqrtT) * d1 / (V * sqrtT)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesDvegaDtimeGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, expDT; Real_t d1, d2, PNDD1; const Real_t one = 1; const Real_t two = 2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; PNDD1 = pndGPU(d1); expDT = _exp(-D*T); CallResult = S*expDT*PNDD1*sqrtT* (D + ((R-D)*d1)/(V*sqrtT) - (one + d1*d2)/(two*T)); PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ inline void BlackScholesVommaGPU(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t sqrtT, callVega, putVega; Real_t d1, d2; sqrtT = _sqrt(T); d1 = (_log(S / X) + (R - D + static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; BlackScholesVegaGPU(callVega, putVega, S, X, T, R, D, V); CallResult = callVega * (d1 * d2) / V; PutResult = CallResult; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility __device__ void AsianGeometricCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V) { Real_t adjVol, adjDiv; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2; Real_t sqrtT, expRT, expBRT; sqrtT = _sqrt(T); adjVol = V * static_cast<Real_t>(0.577350269); // V / sqrt(3) adjDiv = static_cast<Real_t>(0.5) * (R - D - V*V*static_cast<Real_t>(0.1666666666)); // (0.5 * (R - D - V*V/6)) d1 = (_log(S / X) + (adjDiv + static_cast<Real_t>(0.5)*adjVol*adjVol) * T) / (adjVol * sqrtT); d2 = d1 - adjVol*sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); expBRT = _exp((adjDiv - R) * T); CallResult = S * expBRT * CNDD1 - X * expRT * CNDD2; PutResult = X * expRT * CNDnD2 - S * expBRT * CNDnD1; } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility, H: barrier, eta and phi are +/- 1 based on the type of barrier. __device__ void BarrierCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V, Real_t H, Real_t rebate, Real_t eta, Real_t phi, unsigned int bType) { Real_t sqrtT, invVolSqrtT, mu, lambda, z, x1, x2, y1, y2, expDT, expRT; Real_t AA, BB, CC, DD, EE, FF; const Real_t one = static_cast<Real_t>(1.0); const Real_t two = static_cast<Real_t>(2.0); sqrtT = _sqrt(T); invVolSqrtT = static_cast<Real_t>(1.0)/(V * sqrtT); mu = (R - D - static_cast<Real_t>(0.5)*V*V)/(V * V); lambda = (mu * mu + (static_cast<Real_t>(2.0) * R)/(V * V)); z = _log(H / S) * invVolSqrtT + lambda * V * sqrtT; x1 = _log(S / X) * invVolSqrtT + (one + mu) * V * sqrtT; x2 = _log(S / H) * invVolSqrtT + (one + mu) * V * sqrtT; y1 = _log((H*H)/(S*X)) * invVolSqrtT + (one + mu) * V * sqrtT; y2 = _log(H / S) * invVolSqrtT + (one + mu) * V * sqrtT; expDT = _exp(-D*T); expRT = _exp(-R*T); AA = phi * S * expDT * cndGPU(phi*x1) - phi * X * expRT * cndGPU(phi * x1 - phi * V * sqrtT); BB = phi * S * expDT * cndGPU(phi*x2) - phi * X * expRT * cndGPU(phi * x2 - phi * V * sqrtT); CC = phi * S * expDT * pow(H/S, two*mu + two) * cndGPU(eta*y1) - phi * X * expRT * pow(H/S, two*mu) * cndGPU(eta * y1 - eta * V * sqrtT); DD = phi * S * expDT * pow(H/S, two*mu + two) * cndGPU(eta*y2) - phi * X * expRT * pow(H/S, two*mu) * cndGPU(eta * y2 - eta * V * sqrtT); EE = rebate * expRT * (cndGPU(eta * x2 - eta * V * sqrtT) - pow(H/S, two*mu) * cndGPU(eta * y2 - eta * V * sqrtT)); FF = rebate * (pow(H/S, mu + lambda) * cndGPU(eta * z) + pow(H/S, mu - lambda) * cndGPU(eta * z - two * eta * lambda * V * sqrtT)); switch(bType) { case BARRIERDOWNIN: if(X > H) { CallResult = CC + EE; PutResult = BB - CC + DD + EE; } else { CallResult = AA - BB + DD + EE; PutResult = AA + EE; } break; case BARRIERDOWNOUT: if(X > H) { CallResult = AA - CC + FF; PutResult = AA - BB + CC - DD + FF; } else { CallResult = BB - DD + FF; PutResult = FF; } break; case BARRIERUPIN: if(X > H) { CallResult = AA + EE; PutResult = AA - BB + DD + EE; } else { CallResult = BB - CC + DD + EE; PutResult = CC + EE; } break; case BARRIERUPOUT: if(X > H) { CallResult = FF; PutResult = BB - DD + FF; } else { CallResult = AA - BB + CC - DD + FF; PutResult = AA - CC + FF; } break; } } // S: spot price, X: strike Price, T: Expiration, R: Risk free interest rate, D: Dividend, V: Volatility, lbType: Specifies floating or fixed lookback __device__ void LookbackCalculate(Real_t& CallResult, Real_t& PutResult, Real_t S, Real_t X, Real_t T, Real_t R, Real_t D, Real_t V, unsigned int lbType) { Real_t sqrtT, expRT, expDT, expRDT; Real_t d1, d2, CNDD1, CNDD2, CNDnD1, CNDnD2, K; K = max(S, X); sqrtT = _sqrt(T); d1 = (_log(S/X) + (R + D - static_cast<Real_t>(0.5) * V * V) * T) / (V * sqrtT); d2 = d1 - V * sqrtT; CNDD1 = cndGPU(d1); CNDD2 = cndGPU(d2); CNDnD1 = cndGPU(-d1); CNDnD2 = cndGPU(-d2); expRT = _exp(-R*T); expDT = _exp(-D*T); expRDT = _exp((R-D)*T); if(lbType == LOOKBACKFIXED) { CallResult = expRT * max(S - X, static_cast<Real_t>(0.0)) + S * expDT * CNDD1 - K * expRT * CNDD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * (-pow(S/K, static_cast<Real_t>(-2.0) * (R - D) / (V * V))) * cndGPU(d1 - static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) + expRDT * CNDD1; PutResult = expRT * max(X - S, static_cast<Real_t>(0.0)) - S * expDT * CNDnD1 + K * expRT * CNDnD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * (pow(S/K, static_cast<Real_t>(-2.0) * (R - D) / (V * V))) * cndGPU(-d1 + static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) - expRDT * CNDnD1; } else { CallResult = S * expDT * CNDD1 - S * expRT * CNDD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * cndGPU(-d1 + static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) - expRDT * CNDnD1; PutResult = -S * expDT * CNDnD1 + S * expRT * CNDnD2 + S * expRT * V * V / (static_cast<Real_t>(2.0) * (R - D)) * cndGPU(d1 - static_cast<Real_t>(2.0) * (R - D) * sqrtT / V) + expRDT * CNDD1; } } __global__ void BlackScholesGPU(Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, int optN, int toCalculate) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; /*for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesValueGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); }*/ switch(toCalculate) { case VALUE: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesValueGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case DELTA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesDeltaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VEGA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVegaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case THETA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesThetaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case RHO: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesRhoGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case GAMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesGammaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VANNA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVannaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case CHARM: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesCharmGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case VOMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesVommaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case DVEGADTIME: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesDvegaDtimeGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case SPEED: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesSpeedGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case ZOMMA: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesZommaGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; case COLOR: for(int opt = tid; opt < optN; opt += THREAD_N) { BlackScholesColorGPU(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } break; default: for(int opt = tid; opt < optN; opt += THREAD_N) { d_CallResult[opt] = static_cast<Real_t>(0.0); d_PutResult[opt] = static_cast<Real_t>(0.0); } break; } } __global__ void AsianGeometricAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int opt = tid; opt < optN; opt += THREAD_N) { AsianGeometricCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt]); } } __global__ void BarrierAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, Real_t *d_Barrier, Real_t *d_Rebate, unsigned int barrierType, int callOrPut) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; Real_t eta, phi; switch(barrierType) { case BARRIERDOWNIN: eta = static_cast<Real_t>(1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERDOWNOUT: eta = static_cast<Real_t>(1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERUPIN: eta = static_cast<Real_t>(-1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; case BARRIERUPOUT: eta = static_cast<Real_t>(-1.0); phi = callOrPut == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); break; } for(int opt = tid; opt < optN; opt += THREAD_N) { BarrierCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt], d_Barrier[opt], d_Rebate[opt], eta, phi, barrierType); } } __global__ void LookbackAnalyticGPU(int optN, Real_t *d_CallResult, Real_t *d_PutResult, Real_t *d_CurrentPrice, Real_t *d_OptionStrike, Real_t *d_Expiration, Real_t *d_InterestRate, Real_t *d_Dividends, Real_t *d_Volatility, unsigned int lookbackType) { const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; for(int opt = tid; opt < optN; opt += THREAD_N) { LookbackCalculate(d_CallResult[opt], d_PutResult[opt], d_CurrentPrice[opt], d_OptionStrike[opt], d_Expiration[opt], d_InterestRate[opt], d_Dividends[opt], d_Volatility[opt], lookbackType); } } static int iBlackScholes(WGL_Memory_t call, WGL_Memory_t put, WGL_Memory_t spot, WGL_Memory_t strikePrice, WGL_Memory_t expiration, WGL_Memory_t interest, WGL_Memory_t volatility, WGL_Memory_t dividend, WGL_Memory_t barrier, WGL_Memory_t rebate, mint numOptions, mint calculationType, mint optionType, int callOrPut) { mbool barrierQ = False; dim3 blockDim(128); dim3 gridDim(512); if (!(WGL_Type_RealQ(call) && WGL_Type_RealQ(put) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strikePrice) && WGL_Type_RealQ(expiration) && WGL_Type_RealQ(interest) && WGL_Type_RealQ(volatility) && WGL_Type_RealQ(dividend))) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, call, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, put, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strikePrice, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, expiration, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, interest, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, dividend, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, volatility, wglErr), cleanup); if (calculationType >= 0) { BlackScholesGPU<<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), numOptions, calculationType ); } else if (optionType == ASIANGEOMETRIC) { AsianGeometricAnalyticGPU<<<gridDim, blockDim>>>( numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility) ); } else if (optionType == LOOKBACKFIXED || optionType == LOOKBACKFLOATING) { LookbackAnalyticGPU<<<gridDim, blockDim>>>( numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), optionType ); } else { barrierQ = True; WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, barrier, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, rebate, wglErr), cleanup); BarrierAnalyticGPU<<<gridDim, blockDim>>>( numOptions, CUDA_Runtime_getDeviceMemoryAsReal(call), CUDA_Runtime_getDeviceMemoryAsReal(put), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strikePrice), CUDA_Runtime_getDeviceMemoryAsReal(expiration), CUDA_Runtime_getDeviceMemoryAsReal(interest), CUDA_Runtime_getDeviceMemoryAsReal(dividend), CUDA_Runtime_getDeviceMemoryAsReal(volatility), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(rebate), optionType, callOrPut ); } CUDA_Runtime_synchronize(wglErr); cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, call, wglErr); CUDA_Runtime_setMemoryAsValidOutput(wglState, put, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, call, wglErr); CUDA_Runtime_setMemoryAsInvalidOutput(wglState, put, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strikePrice, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, expiration, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, interest, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, volatility, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, dividend, wglErr); if (barrierQ) { CUDA_Runtime_unsetMemoryAsInput(wglState, rebate, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, barrier, wglErr); } if (WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else { return LIBRARY_FUNCTION_ERROR; } } EXTERN_C DLLEXPORT int oBlackScholes(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t callMemory, putMemory, currentPriceMemory, strikePriceMemory, expirationMemory, interestMemory; WGL_Memory_t volatilityMemory, dividendMemory, barrierMemory, rebateMemory; mint callMemoryId, putMemoryId, currentPriceMemoryId, strikePriceMemoryId, expirationMemoryId, interestMemoryId; mint volatilityMemoryId, dividendMemoryId, barrierMemoryId, rebateMemoryId; mint numOptions, calculationType, optionType, callOrPut; int err; assert(Argc == 16); callMemoryId = MArgument_getInteger(Args[0]); putMemoryId = MArgument_getInteger(Args[1]); currentPriceMemoryId = MArgument_getInteger(Args[2]); strikePriceMemoryId = MArgument_getInteger(Args[3]); expirationMemoryId = MArgument_getInteger(Args[4]); interestMemoryId = MArgument_getInteger(Args[5]); volatilityMemoryId = MArgument_getInteger(Args[6]); dividendMemoryId = MArgument_getInteger(Args[7]); barrierMemoryId = MArgument_getInteger(Args[8]); rebateMemoryId = MArgument_getInteger(Args[9]); numOptions = MArgument_getInteger(Args[10]); calculationType = MArgument_getInteger(Args[11]); optionType = MArgument_getInteger(Args[12]); callOrPut = MArgument_getInteger(Args[13]); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); callMemory = wglData->findMemory(wglData, callMemoryId); putMemory = wglData->findMemory(wglData, putMemoryId); currentPriceMemory = wglData->findMemory(wglData, currentPriceMemoryId); strikePriceMemory = wglData->findMemory(wglData, strikePriceMemoryId); expirationMemory = wglData->findMemory(wglData, expirationMemoryId); interestMemory = wglData->findMemory(wglData, interestMemoryId); volatilityMemory = wglData->findMemory(wglData, volatilityMemoryId); dividendMemory = wglData->findMemory(wglData, dividendMemoryId); barrierMemory = wglData->findMemory(wglData, barrierMemoryId); rebateMemory = wglData->findMemory(wglData, rebateMemoryId); err = iBlackScholes(callMemory, putMemory, currentPriceMemory, strikePriceMemory, expirationMemory, interestMemory, volatilityMemory, dividendMemory, barrierMemory, rebateMemory, numOptions, calculationType, optionType, callOrPut); cleanup: if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } /************************************************/ /* Binomial Method Options Pricing */ /************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/binomialOptions **/ #define CACHE_DELTA 32 #define CACHE_SIZE 256 #define CACHE_STEP (CACHE_SIZE - CACHE_DELTA) #define NUM_STEPS 128 __device__ inline Real_t expiryCallValue(Real_t S, Real_t X, Real_t vDt, float callPutFactor, int i) { Real_t d = S * _exp(vDt * (NUM_STEPS - static_cast<Real_t>(2.0) * i)) - X; d *= callPutFactor; return (d > static_cast<Real_t>(0.0)) ? d : static_cast<Real_t>(0.0); } __global__ void binomialOptionsKernel(Real_t* d_CallValue, Real_t* d_CallBuffer, Real_t* d_S, Real_t* d_X, Real_t* d_vDt, Real_t* d_puByDf, Real_t* d_pdByDf, int optType, int call) { __shared__ Real_t callA[CACHE_SIZE+1]; __shared__ Real_t callB[CACHE_SIZE+1]; Real_t *const d_Call = &d_CallBuffer[blockIdx.x * (NUM_STEPS + 16)]; const int tid = threadIdx.x; const Real_t S = d_S[blockIdx.x]; const Real_t X = d_X[blockIdx.x]; const Real_t vDt = d_vDt[blockIdx.x]; const Real_t puByDf = d_puByDf[blockIdx.x]; const Real_t pdByDf = d_pdByDf[blockIdx.x]; const Real_t callPutFactor = call == 1 ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); Real_t callValue, temp, currentVal; for(int i = tid; i <= NUM_STEPS; i+= CACHE_SIZE) d_Call[i] = expiryCallValue(S, X, vDt, callPutFactor, i); for(int i = NUM_STEPS; i > 0; i -= CACHE_DELTA) { for(int c_base = 0; c_base < i; c_base += CACHE_STEP) { int c_start = min(CACHE_SIZE - 1, i - c_base); int c_end = c_start - CACHE_DELTA; __syncthreads(); if(tid <= c_start) callA[tid] = d_Call[c_base + tid]; currentVal = vDt * static_cast<Real_t>(i - 2*(c_base + tid) - 1); for(int k = c_start - 1; k >= c_end;) { __syncthreads(); callValue = pdByDf * callA[tid+1] + puByDf * callA[tid]; if(optType == AMERICAN) { temp = S * _exp(currentVal) - X; temp *= callPutFactor; callValue = callValue > temp ? callValue : temp; } callB[tid] = callValue; k--; currentVal -= vDt; __syncthreads(); callValue = pdByDf * callB[tid+1] + puByDf * callB[tid]; if(optType == AMERICAN) { temp = S * _exp(currentVal) - X; temp *= callPutFactor; callValue = callValue > temp ? callValue : temp; } callA[tid] = callValue; k--; currentVal -= vDt; } __syncthreads(); if(tid <= c_end) d_Call[c_base + tid] = callA[tid]; } } if(threadIdx.x == 0) d_CallValue[blockIdx.x] = static_cast<Real_t>(callA[0]); } static int iBinomialMethod(WGL_Memory_t priceRes, WGL_Memory_t spot, WGL_Memory_t strike, WGL_Memory_t buffer, WGL_Memory_t vDt, WGL_Memory_t puByDf, WGL_Memory_t pdByDf, mint numOptions, mint optionType, mint callOrPut) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim(numOptions); if (!(WGL_Type_RealQ(priceRes) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strike) && WGL_Type_RealQ(buffer) && WGL_Type_RealQ(vDt) && WGL_Type_RealQ(puByDf) && WGL_Type_RealQ(pdByDf))) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, priceRes, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strike, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, buffer, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, vDt, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, puByDf, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, pdByDf, wglErr), cleanup); binomialOptionsKernel<<<gridDim, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(buffer), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(vDt), CUDA_Runtime_getDeviceMemoryAsReal(puByDf), CUDA_Runtime_getDeviceMemoryAsReal(pdByDf), optionType, callOrPut ); CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, priceRes, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, priceRes, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strike, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, vDt, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, puByDf, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, pdByDf, wglErr); return err; } EXTERN_C DLLEXPORT int oBinomialMethod(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t resMem, currentPriceMem, strikeMem, expirationMem, interestMem, volatilityMem, dividendMem; mint resMemId, currentPriceMemId, strikeMemId, expirationMemId, interestMemId, volatilityMemId, dividendMemId; mint numOptions, optionType, callOrPut; mint numSteps = 128;//treeDepth; Real_t * dt = NULL, * vDt = NULL, * rDt = NULL, * If = NULL, * df = NULL; Real_t * u = NULL, * d = NULL, * pu = NULL, * pd = NULL, * puByDf = NULL, * pdByDf = NULL; Real_t * hCallBuffer = NULL; WGL_Memory_t callBufferMem = NULL, vDtMem = NULL, puByDfMem = NULL, pdByDfMem = NULL; int err = LIBRARY_NO_ERROR; double * hExpiration, * hVolatility, * hInterest, * hDividend; assert(Argc == 12); resMemId = MArgument_getInteger(Args[0]); currentPriceMemId = MArgument_getInteger(Args[1]); strikeMemId = MArgument_getInteger(Args[2]); expirationMemId = MArgument_getInteger(Args[3]); interestMemId = MArgument_getInteger(Args[4]); volatilityMemId = MArgument_getInteger(Args[5]); dividendMemId = MArgument_getInteger(Args[6]); numOptions = MArgument_getInteger(Args[7]); optionType = MArgument_getInteger(Args[8]); callOrPut = MArgument_getInteger(Args[9]); resMem = wglData->findMemory(wglData, resMemId); currentPriceMem = wglData->findMemory(wglData, currentPriceMemId); strikeMem = wglData->findMemory(wglData, strikeMemId); expirationMem = wglData->findMemory(wglData, expirationMemId); interestMem = wglData->findMemory(wglData, interestMemId); volatilityMem = wglData->findMemory(wglData, volatilityMemId); dividendMem = wglData->findMemory(wglData, dividendMemId); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); New(dt, Real_t, numOptions * sizeof(Real_t)); New(vDt, Real_t, numOptions * sizeof(Real_t)); New(rDt, Real_t, numOptions * sizeof(Real_t)); New(If, Real_t, numOptions * sizeof(Real_t)); New(df, Real_t, numOptions * sizeof(Real_t)); New(u, Real_t, numOptions * sizeof(Real_t)); New(d, Real_t, numOptions * sizeof(Real_t)); New(pu, Real_t, numOptions * sizeof(Real_t)); New(pd, Real_t, numOptions * sizeof(Real_t)); New(puByDf, Real_t, numOptions * sizeof(Real_t)); New(pdByDf, Real_t, numOptions * sizeof(Real_t)); hExpiration = wglData->MTensorMemory_getRealData(wglData, expirationMem); assert(hExpiration != NULL); hVolatility = wglData->MTensorMemory_getRealData(wglData, volatilityMem); assert(hVolatility != NULL); hInterest = wglData->MTensorMemory_getRealData(wglData, interestMem); assert(hInterest != NULL); hDividend = wglData->MTensorMemory_getRealData(wglData, dividendMem); assert(hDividend != NULL); New(hCallBuffer, Real_t, numOptions * (numSteps + 16) * sizeof(Real_t)); // We need to calculate pseudoprobabilities that the price of the asset will go up or down, as well as the amount it will go up or down. for (mint ii = 0; ii < numOptions; ii++) { // Width of a time step dt[ii] = static_cast<Real_t>(hExpiration[ii]) / static_cast<Real_t>(numSteps); // Volatility multiplied by square root of the timestep -- comes up in simulating brownian motion vDt[ii] = static_cast<Real_t>(hVolatility[ii]) * sqrt(dt[ii]); // Used to account for the rate of risk free interest and the dividends of the asset rDt[ii] = static_cast<Real_t>(hInterest[ii] - hDividend[ii]) * dt[ii]; // As above [these could probably be combined into one step] If[ii] = exp(rDt[ii]); // Used to account for just risk free interest df[ii] = exp(static_cast<Real_t>(-hInterest[ii] * dt[ii])); // Amount increased (u) or decreased (d) at each time step u[ii] = exp(vDt[ii]); d[ii] = exp(-vDt[ii]); // Pseudoprobability of increase (pu) or decrease (pd) pu[ii] = (If[ii] - d[ii]) / (u[ii] - d[ii]); pd[ii] = 1.0f - pu[ii]; // Multiply by df to adjust for risk free interest rate. puByDf[ii] = pu[ii] * df[ii]; pdByDf[ii] = pd[ii] * df[ii]; } callBufferMem = wglData->newRawMemory(wglData, (void**)&hCallBuffer, WGL_MemoryResidence_DeviceHost, numOptions * (numSteps + 16) * sizeof(Real_t), True); callBufferMem->type = WGL_Real_t; assert(WGL_SuccessQ); vDtMem = wglData->newRawMemory(wglData, (void**)&vDt, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); vDtMem->type = WGL_Real_t; assert(WGL_SuccessQ); puByDfMem = wglData->newRawMemory(wglData, (void**)&puByDf, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); puByDfMem->type = WGL_Real_t; assert(WGL_SuccessQ); pdByDfMem = wglData->newRawMemory(wglData, (void**)&pdByDf, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); pdByDfMem->type = WGL_Real_t; assert(WGL_SuccessQ); err = iBinomialMethod(resMem, currentPriceMem, strikeMem, callBufferMem, vDtMem, puByDfMem, pdByDfMem, numOptions, optionType, callOrPut); cleanup: Free(dt); Free(rDt); Free(If); Free(df); Free(u); Free(d); Free(pu); Free(pd); wglData->freeMemory(wglData, callBufferMem); wglData->freeMemory(wglData, vDtMem); wglData->freeMemory(wglData, puByDfMem); wglData->freeMemory(wglData, pdByDfMem); if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } /************************************************/ /* Binomial Method Options Pricing */ /************************************************/ /** * Original code is under * ${basedir}/ExtraComponents/CUDA_SDK/3.0/Linux-x86-64/C/src/binomialOptions **/ #define THREAD_N 256 // Barrier types (masks, eg a down and out option has type 3, up and in has type 0, etc.) #define BARRIER_DOWN 1 #define BARRIER_OUT 2 #define LOOKBACK_FIXED 0 #define LOOKBACK_FLOATING 1 template<unsigned int blockSize> __device__ void sumReduceSharedMem(volatile Real_t *sum, int tid) { // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sum[tid] += sum[tid + 256]; } __syncthreads();} if (blockSize >= 256) { if (tid < 128) { sum[tid] += sum[tid + 128]; } __syncthreads();} if (blockSize >= 128) { if (tid < 64) { sum[tid] += sum[tid + 64]; } __syncthreads();} if (tid < 32) { if (blockSize >= 64) { sum[tid] += sum[tid + 32]; } if (blockSize >= 32) { sum[tid] += sum[tid + 16]; } if (blockSize >= 16) { sum[tid] += sum[tid + 8]; } if (blockSize >= 8) { sum[tid] += sum[tid + 4]; } if (blockSize >= 4) { sum[tid] += sum[tid + 2]; } if (blockSize >= 2) { sum[tid] += sum[tid + 1]; } } } #define UNROLL_REDUCTION template<int SUM_N, int blockSize> __device__ void sumReduce(Real_t *sum) { #ifdef UNROLL_REDUCTION for(int pos = threadIdx.x; pos < SUM_N; pos += blockSize){ __syncthreads(); sumReduceSharedMem<blockSize>(sum, pos); } #else for(int stride = SUM_N / 2; stride > 0; stride >>= 1){ __syncthreads(); for(int pos = threadIdx.x; pos < stride; pos += blockSize){ sum[pos] += sum[pos + stride]; } } #endif } // S: spot price, X: strike price // MuByT and VBySqrtT are the mean and variance of the normal random variables used to simulate brownian motion. // d_Samples is a pool of normally distributed random samples. __device__ inline Real_t endCallValueBarrier(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, Real_t barrier, int barrierType) { Real_t value = S; unsigned int i; Real_t r = 0; Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = 0; Real_t logBarrier = _log(barrier / S); // If exponent crosses this it is equivalent to value crossing barrier. Cuts down on computation. unsigned int crossed = 0; if((exponent < logBarrier) && (barrierType & BARRIER_DOWN)) crossed = 1; if((exponent > logBarrier) && !(barrierType & BARRIER_DOWN)) crossed = 1; for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; if((exponent < logBarrier) && (barrierType & BARRIER_DOWN)) crossed = 1; if((exponent > logBarrier) && !(barrierType & BARRIER_DOWN)) crossed = 1; } value = S * _exp(exponent); Real_t callValue = value - X; callValue *= callPutFactor; if((crossed == 1) && (barrierType & BARRIER_OUT)) return static_cast<Real_t>(0.0); if((crossed == 0) && !(barrierType & BARRIER_OUT)) return static_cast<Real_t>(0.0); return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueLookback(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, int lookbackType) { Real_t maxValue = S; Real_t minValue = S; Real_t currentValue = S; unsigned int i; Real_t r = static_cast<Real_t>(0.0); Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = static_cast<Real_t>(0.0); Real_t callValue, putValue; for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; currentValue = S * _exp(exponent); if(currentValue < minValue) minValue = currentValue; if(currentValue > maxValue) maxValue = currentValue; } if(lookbackType == LOOKBACK_FLOATING) { callValue = currentValue - minValue; putValue = maxValue - currentValue; } else { callValue = maxValue - X; putValue = X - minValue; } if(callPutFactor > static_cast<Real_t>(0.0)) return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); else return (putValue > static_cast<Real_t>(0.0)) ? putValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueAsian(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN) { Real_t sum = S; unsigned int i; Real_t r = static_cast<Real_t>(0.0); Real_t sqrtdt = rsqrt((Real_t)depthN); Real_t dt = sqrtdt*sqrtdt; Real_t exponent = static_cast<Real_t>(0.0); for(i = 0; i < depthN; i++) { r = d_Samples[index + i * pathN]; exponent += MuByT * dt + VBySqrtT * sqrtdt * r; sum += S * _exp(exponent); } sum /= static_cast<Real_t>(depthN + 1); Real_t callValue = sum - X; callValue *= callPutFactor; return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValueEuropean(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN) { Real_t r = d_Samples[index]; Real_t callValue = S * _exp(MuByT + VBySqrtT * r) - X; callValue *= callPutFactor; return (callValue > static_cast<Real_t>(0.0)) ? callValue : static_cast<Real_t>(0.0); } __device__ inline Real_t endCallValue(Real_t S, Real_t X, Real_t MuByT, Real_t VBySqrtT, unsigned int index, unsigned int pathN, Real_t *d_Samples, Real_t callPutFactor, unsigned int depthN, int optType, Real_t barrier) { Real_t res = static_cast<Real_t>(-1.0); switch(optType) { case EUROPEAN: res = endCallValueEuropean(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN); break; case ASIAN: res = endCallValueAsian(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN); break; case BARRIERUPIN: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, 0); break; case BARRIERUPOUT: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_OUT); break; case BARRIERDOWNIN: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_DOWN); break; case BARRIERDOWNOUT: res = endCallValueBarrier(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, barrier, BARRIER_DOWN | BARRIER_OUT); break; case LOOKBACKFIXED: res = endCallValueLookback(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, LOOKBACK_FIXED); break; case LOOKBACKFLOATING: res = endCallValueLookback(S, X, MuByT, VBySqrtT, index, pathN, d_Samples, callPutFactor, depthN, LOOKBACK_FIXED); break; default: break; } return res; } __global__ void MonteCarloKernel( Real_t *d_S, Real_t *d_X, Real_t *d_MuByT, Real_t *d_VBySqrtT, Real_t *d_Barrier, Real_t *d_Buffer, Real_t *d_Samples, unsigned int pathN, unsigned int depthN, int optType, int call) { const int optionIndex = blockIdx.y; const Real_t S = d_S[optionIndex]; const Real_t X = d_X[optionIndex]; const Real_t MuByT = d_MuByT[optionIndex]; const Real_t VBySqrtT = d_VBySqrtT[optionIndex]; const Real_t Barrier = d_Barrier[optionIndex]; const Real_t callPutFactor = call ? static_cast<Real_t>(1.0) : static_cast<Real_t>(-1.0); //One thread per partial integral const unsigned int iSum = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int accumN = blockDim.x * gridDim.x; //Cycle through the entire samples array: //derive end stock price for each path //accumulate into intermediate global memory array Real_t sumCall = static_cast<Real_t>(0.0); for(unsigned int i = iSum; i < pathN; i += accumN){ Real_t callValue = endCallValue(S, X, MuByT, VBySqrtT, i, pathN, d_Samples, callPutFactor, depthN, optType, Barrier); sumCall += callValue; } d_Buffer[optionIndex * accumN + iSum] = sumCall; } __global__ void MonteCarloOneBlockPerOption( Real_t *d_CallValue, Real_t *d_S, Real_t *d_X, Real_t *d_MuByT, Real_t *d_VBySqrtT, Real_t *d_Barrier, Real_t *d_Samples, unsigned int pathN, unsigned int depthN, int optType, int call) { const int SUM_N = THREAD_N; __shared__ Real_t s_SumCall[SUM_N]; const int optionIndex = blockIdx.x; const Real_t S = d_S[optionIndex]; const Real_t X = d_X[optionIndex]; const Real_t MuByT = d_MuByT[optionIndex]; const Real_t VBySqrtT = d_VBySqrtT[optionIndex]; const Real_t Barrier = d_Barrier[optionIndex]; const Real_t callPutFactor = call ? 1 : -1; Real_t sumCall; Real_t callValue = 0; //Cycle through the entire samples array: //derive end stock price for each path //accumulate partial integrals into intermediate shared memory buffer for(unsigned int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ sumCall = static_cast<Real_t>(0.0); for(unsigned int i = iSum; i < pathN; i += SUM_N){ callValue = endCallValue(S, X, MuByT, VBySqrtT, i, pathN, d_Samples, callPutFactor, depthN, optType, Barrier); sumCall += callValue; } s_SumCall[iSum] = sumCall; } sumReduce<SUM_N, THREAD_N>(s_SumCall); if(threadIdx.x == 0){ d_CallValue[optionIndex] = s_SumCall[0]; } } __global__ void MonteCarloReduce( Real_t *d_CallValue, Real_t *d_Buffer, int accumN) { const int SUM_N = THREAD_N; __shared__ Real_t s_SumCall[SUM_N]; Real_t *d_SumBase = &d_Buffer[blockIdx.x * accumN]; for(int iSum = threadIdx.x; iSum < SUM_N; iSum += blockDim.x){ Real_t sumCall = 0; for(int pos = iSum; pos < accumN; pos += SUM_N){ sumCall += d_SumBase[pos]; } s_SumCall[iSum] = sumCall; } if(threadIdx.x == 0){ for(int i=1; i<SUM_N; i++) s_SumCall[0] += s_SumCall[i]; d_CallValue[blockIdx.x] = s_SumCall[0]; } } __device__ inline Real_t MoroInvCNDgpu(Real_t P){ const Real_t a1 = static_cast<Real_t>(2.50662823884); const Real_t a2 = static_cast<Real_t>(-18.61500062529); const Real_t a3 = static_cast<Real_t>(41.39119773534); const Real_t a4 = static_cast<Real_t>(-25.44106049637); const Real_t b1 = static_cast<Real_t>(-8.4735109309); const Real_t b2 = static_cast<Real_t>(23.08336743743); const Real_t b3 = static_cast<Real_t>(-21.06224101826); const Real_t b4 = static_cast<Real_t>(3.13082909833); const Real_t c1 = static_cast<Real_t>(0.337475482272615); const Real_t c2 = static_cast<Real_t>(0.976169019091719); const Real_t c3 = static_cast<Real_t>(0.160797971491821); const Real_t c4 = static_cast<Real_t>(2.76438810333863E-02); const Real_t c5 = static_cast<Real_t>(3.8405729373609E-03); const Real_t c6 = static_cast<Real_t>(3.951896511919E-04); const Real_t c7 = static_cast<Real_t>(3.21767881768E-05); const Real_t c8 = static_cast<Real_t>(2.888167364E-07); const Real_t c9 = static_cast<Real_t>(3.960315187E-07); Real_t y, z; y = P - static_cast<Real_t>(0.5); if(_abs(y) < static_cast<Real_t>(0.42)){ z = y * y; z = y * (((a4 * z + a3) * z + a2) * z + a1) / ((((b4 * z + b3) * z + b2) * z + b1) * z + 1); }else{ if(y > 0) z = _log(-_log(1 - P)); else z = _log(-_log(P)); z = c1 + z * (c2 + z * (c3 + z * (c4 + z * (c5 + z * (c6 + z * (c7 + z * (c8 + z * c9))))))); if(y < 0) z = -z; } return z; } // d_Samples should be filled with Uniform pseudo-random or quasi-random samples in [0,1] // Moro Inversion is used to convert Uniform to Normal[0,1] __global__ void inverseCNDKernel( Real_t *d_Samples, unsigned int pathN) { unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; unsigned int threadN = blockDim.x * gridDim.x; for(unsigned int pos = tid; pos < pathN; pos += threadN) { Real_t d = d_Samples[pos]; d_Samples[pos] = MoroInvCNDgpu(d); } } static int iMonteCarloMethod(WGL_Memory_t priceRes, WGL_Memory_t spot, WGL_Memory_t strike, WGL_Memory_t muByT, WGL_Memory_t vBySqrtT, WGL_Memory_t barrier, WGL_Memory_t buffer, WGL_Memory_t samples, mint pathN, mint depthN, mint numOptions, mint optType, mint callOrPut, mint blocksPerOption) { int err = LIBRARY_FUNCTION_ERROR; dim3 blockDim(256); dim3 gridDim1(blocksPerOption, numOptions); dim3 gridDim2(numOptions); dim3 moroBlockDim(128); dim3 moroGridDim(1); if (!(WGL_Type_RealQ(priceRes) && WGL_Type_RealQ(spot) && WGL_Type_RealQ(strike) && WGL_Type_RealQ(muByT) && WGL_Type_RealQ(vBySqrtT) && WGL_Type_RealQ(barrier) && WGL_Type_RealQ(samples))) { return LIBRARY_TYPE_ERROR; } else if (callOrPut != 1.0 && callOrPut != -1.0) { return LIBRARY_FUNCTION_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsOutput(wglState, priceRes, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, spot, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, strike, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, muByT, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, vBySqrtT, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, barrier, wglErr), cleanup); WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, samples, wglErr), cleanup); inverseCNDKernel<<<moroGridDim, moroBlockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN*depthN ); CUDA_Runtime_synchronize(wglErr); if (blocksPerOption != 1) { if (!WGL_Type_RealQ(buffer)) { return LIBRARY_TYPE_ERROR; } WGL_SAFE_CALL(CUDA_Runtime_setMemoryAsInput(wglState, buffer, wglErr), cleanup); MonteCarloKernel<<<gridDim1, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(muByT), CUDA_Runtime_getDeviceMemoryAsReal(vBySqrtT), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(buffer), CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN, depthN, optType, callOrPut ); MonteCarloReduce<<<gridDim2, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(buffer), 256 * blocksPerOption ); } else { MonteCarloOneBlockPerOption<<<gridDim2, blockDim>>>( CUDA_Runtime_getDeviceMemoryAsReal(priceRes), CUDA_Runtime_getDeviceMemoryAsReal(spot), CUDA_Runtime_getDeviceMemoryAsReal(strike), CUDA_Runtime_getDeviceMemoryAsReal(muByT), CUDA_Runtime_getDeviceMemoryAsReal(vBySqrtT), CUDA_Runtime_getDeviceMemoryAsReal(barrier), CUDA_Runtime_getDeviceMemoryAsReal(samples), pathN, depthN, optType, callOrPut ); } CUDA_Runtime_synchronize(wglErr); if (WGL_SuccessQ) { err = LIBRARY_NO_ERROR; } cleanup: if (WGL_SuccessQ) { CUDA_Runtime_setMemoryAsValidOutput(wglState, priceRes, wglErr); } else { CUDA_Runtime_setMemoryAsInvalidOutput(wglState, priceRes, wglErr); } CUDA_Runtime_unsetMemoryAsInput(wglState, spot, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, strike, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, muByT, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, vBySqrtT, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, barrier, wglErr); CUDA_Runtime_unsetMemoryAsInput(wglState, samples, wglErr); if (blocksPerOption != 1) { CUDA_Runtime_unsetMemoryAsInput(wglState, buffer, wglErr); } return err; } EXTERN_C DLLEXPORT int oMonteCarloMethod(WolframLibraryData libData, mint Argc, MArgument * Args, MArgument Res) { WGL_Memory_t resMem, currentPriceMem, strikeMem, expirationMem, interestMem, volatilityMem, dividendMem, barrierMem; mint resMemId, currentPriceMemId, strikeMemId, expirationMemId, interestMemId, volatilityMemId, dividendMemId, barrierMemId; mint numOptions, optType, callOrPut; mint pathN = 10000, depthN = 500, threadN = 256, blocksPerOption = 1; Real_t * muByT = NULL, * vBySqrtT = NULL, * hBuffer = NULL, * hSamples = NULL; WGL_Memory_t callBufferMem = NULL, muByTMem = NULL, vBySqrtTMem = NULL, samplesMem = NULL; double * hExpiration = NULL, * hVolatility = NULL, * hInterest = NULL, * hDividend = NULL, * hRes = NULL, * RT = NULL; int err = LIBRARY_NO_ERROR; assert(Argc == 13); resMemId = MArgument_getInteger(Args[0]); currentPriceMemId = MArgument_getInteger(Args[1]); strikeMemId = MArgument_getInteger(Args[2]); expirationMemId = MArgument_getInteger(Args[3]); interestMemId = MArgument_getInteger(Args[4]); volatilityMemId = MArgument_getInteger(Args[5]); dividendMemId = MArgument_getInteger(Args[6]); barrierMemId = MArgument_getInteger(Args[7]); numOptions = MArgument_getInteger(Args[8]); optType = MArgument_getInteger(Args[9]); callOrPut = MArgument_getInteger(Args[10]); resMem = wglData->findMemory(wglData, resMemId); currentPriceMem = wglData->findMemory(wglData, currentPriceMemId); strikeMem = wglData->findMemory(wglData, strikeMemId); expirationMem = wglData->findMemory(wglData, expirationMemId); interestMem = wglData->findMemory(wglData, interestMemId); volatilityMem = wglData->findMemory(wglData, volatilityMemId); dividendMem = wglData->findMemory(wglData, dividendMemId); barrierMem = wglData->findMemory(wglData, barrierMemId); WGL_SAFE_CALL(wglData->setWolframLibraryData(wglData, libData), cleanup); New(muByT, Real_t, numOptions * sizeof(Real_t)); New(vBySqrtT, Real_t, numOptions * sizeof(Real_t)); New(hSamples, Real_t, pathN * depthN * sizeof(Real_t)); hExpiration = wglData->MTensorMemory_getRealData(wglData, expirationMem); assert(hExpiration != NULL); hVolatility = wglData->MTensorMemory_getRealData(wglData, volatilityMem); assert(hVolatility != NULL); hInterest = wglData->MTensorMemory_getRealData(wglData, interestMem); assert(hInterest != NULL); hDividend = wglData->MTensorMemory_getRealData(wglData, dividendMem); assert(hDividend != NULL); // The only inputs we really need for simulation are the mean (muByT) and standard deviation (vBySqrtT) for the normal random variables used to simulate brownian motion. for (mint ii = 0; ii < numOptions; ii++) { muByT[ii] = static_cast<Real_t>((hInterest[ii] - hDividend[ii] - 0.5f * hVolatility[ii]*hVolatility[ii]) * hExpiration[ii]); vBySqrtT[ii] = static_cast<Real_t>(hVolatility[ii] * sqrt(hExpiration[ii])); } // Create uniform random variables host-side, use a kernel to convert them to Normal(0,1). for (mint ii = 0; ii < pathN * depthN; ii++) { hSamples[ii] = static_cast<Real_t>(rand()) / static_cast<Real_t>(RAND_MAX); } // This determines how many blocks per option to use; it could probably be updated since I pulled it from the nvidia SDK // which did not do "real" monte carlo method (paths), so depthN was not a factor. if (pathN / numOptions >= 8192) { blocksPerOption = numOptions < 16 ? 64 : 16; New(hBuffer, Real_t, blocksPerOption * threadN * numOptions * sizeof(Real_t)); callBufferMem = wglData->newRawMemory(wglData, (void**)&hBuffer, WGL_MemoryResidence_DeviceHost, blocksPerOption * threadN * numOptions * sizeof(Real_t), True); callBufferMem->type = WGL_Real_t; assert(WGL_SuccessQ); } muByTMem = wglData->newRawMemory(wglData, (void**)&muByT, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); muByTMem->type = WGL_Real_t; assert(WGL_SuccessQ); vBySqrtTMem = wglData->newRawMemory(wglData, (void**)&vBySqrtT, WGL_MemoryResidence_DeviceHost, numOptions * sizeof(Real_t), True); vBySqrtTMem->type = WGL_Real_t; assert(WGL_SuccessQ); samplesMem = wglData->newRawMemory(wglData, (void**)&hSamples, WGL_MemoryResidence_DeviceHost, pathN * depthN * sizeof(Real_t), True); samplesMem->type = WGL_Real_t; assert(WGL_SuccessQ); err = iMonteCarloMethod(resMem, currentPriceMem, strikeMem, muByTMem, vBySqrtTMem, barrierMem, callBufferMem, samplesMem, pathN, depthN, numOptions, optType, callOrPut, blocksPerOption); hRes = wglData->MTensorMemory_getRealData(wglData, resMem); assert(hRes != NULL); New(RT, double, numOptions * sizeof(double)); // The output of the kernel does not average of the number of paths or adjust for inflation; we do that now. for (int ii = 0; ii < numOptions; ii++) { RT[ii] = exp(-hInterest[ii]*hExpiration[ii]); hRes[ii] *= RT[ii] / static_cast<Real_t>(pathN); } cleanup: Free(RT); wglData->freeMemory(wglData, callBufferMem); wglData->freeMemory(wglData, muByTMem); wglData->freeMemory(wglData, vBySqrtTMem); wglData->freeMemory(wglData, samplesMem); if (err == LIBRARY_NO_ERROR && WGL_SuccessQ) { return LIBRARY_NO_ERROR; } else if (err != LIBRARY_NO_ERROR) { return err; } else { return LIBRARY_FUNCTION_ERROR; } } WGLEXPORT int WolframGPULibrary_initialize(WolframGPULibraryData wglData0) { wglData = wglData0; return LIBRARY_NO_ERROR; } WGLEXPORT int WolframLibrary_initialize(WolframLibraryData libData) { return LIBRARY_NO_ERROR; } WGLEXPORT void WolframLibrary_uninitialize( ) { return; }
1f8ad6a2d89dbb7616b96230eb2df544959055db.hip
// !!! This is a file automatically generated by hipify!!! /** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 4096 #define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = _fict_[t]; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { hz[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; hipMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); hipMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); hipMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); hipMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); hipEvent_t start,stop; float elapsedTimeInMs = 0.0f; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax, hipMemcpyHostToDevice); hipMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), hipMemcpyHostToDevice); hipMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, hipMemcpyHostToDevice); hipMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)ceil(((float)NX) / ((float)block.y))); for(int t = 0; t< tmax; t++) { hipLaunchKernelGGL(( fdtd_step1_kernel), dim3(grid),dim3(block), 0, 0, _fict_gpu, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); hipLaunchKernelGGL(( fdtd_step2_kernel), dim3(grid),dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); hipLaunchKernelGGL(( fdtd_step3_kernel), dim3(grid),dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); } hipMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.1f Ms \n", elapsedTimeInMs); hipFree(_fict_gpu); hipFree(ex_gpu); hipFree(ey_gpu); hipFree(hz_gpu); } int main() { double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; /* _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); */ hipHostMalloc((void **)&_fict_, sizeof(DATA_TYPE) * tmax, hipHostMallocPortable); hipHostMalloc((void **)&ex, sizeof(DATA_TYPE)*NX*(NY+1), hipHostMallocPortable); hipHostMalloc((void **)&ey, sizeof(DATA_TYPE)*NX*(NY+1), hipHostMallocPortable); hipHostMalloc((void **)&hz, sizeof(DATA_TYPE)*NX*NY, hipHostMallocPortable); hipHostMalloc((void **)&hz_outputFromGpu, sizeof(DATA_TYPE)*NX*NY, hipHostMallocPortable); init_arrays(_fict_, ex, ey, hz); GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(hz, hz_outputFromGpu); hipFree(_fict_); hipFree(ex); hipFree(ey); hipFree(hz); hipFree(hz_outputFromGpu); return 0; }
1f8ad6a2d89dbb7616b96230eb2df544959055db.cu
/** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 4096 #define NY 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 1024 #define DIM_THREAD_BLOCK_Y 1 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = _fict_[t]; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { hz[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; cudaMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); cudaMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); cudaMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); cudaMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); cudaEvent_t start,stop; float elapsedTimeInMs = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax, cudaMemcpyHostToDevice); cudaMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), cudaMemcpyHostToDevice); cudaMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, cudaMemcpyHostToDevice); cudaMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)ceil(((float)NX) / ((float)block.y))); for(int t = 0; t< tmax; t++) { fdtd_step1_kernel<<<grid,block>>>(_fict_gpu, ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step2_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step3_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); } cudaMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTimeInMs, start, stop); fprintf(stdout,"GPU RunTime= %.1f Ms \n", elapsedTimeInMs); cudaFree(_fict_gpu); cudaFree(ex_gpu); cudaFree(ey_gpu); cudaFree(hz_gpu); } int main() { double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; /* _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); */ cudaHostAlloc((void **)&_fict_, sizeof(DATA_TYPE) * tmax, cudaHostAllocPortable); cudaHostAlloc((void **)&ex, sizeof(DATA_TYPE)*NX*(NY+1), cudaHostAllocPortable); cudaHostAlloc((void **)&ey, sizeof(DATA_TYPE)*NX*(NY+1), cudaHostAllocPortable); cudaHostAlloc((void **)&hz, sizeof(DATA_TYPE)*NX*NY, cudaHostAllocPortable); cudaHostAlloc((void **)&hz_outputFromGpu, sizeof(DATA_TYPE)*NX*NY, cudaHostAllocPortable); init_arrays(_fict_, ex, ey, hz); GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); t_start = rtclock(); runFdtd(_fict_, ex, ey, hz); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(hz, hz_outputFromGpu); cudaFree(_fict_); cudaFree(ex); cudaFree(ey); cudaFree(hz); cudaFree(hz_outputFromGpu); return 0; }
3aee43626abd3c00b58eafa2a1634745ee742071.hip
// !!! This is a file automatically generated by hipify!!! /* ------------------------------------------------------------------------ */ /* Copyright 2018, IBM Corp. */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ /* ------------------------------------------------------------------------ */ #ifndef SD_TEMPLATE_FILE #define SD_TEMPLATE_FILE "core/gpu/template_copy.cu" #include "solid.h" #include "solid/base/generic/dtype_assign.h" #include "solid/base/gpu/dtype_gpu.h" #include "solid/core/gpu/apply_elemwise2.h" #include "solid/core/gpu/apply_elemwise2b.h" #include "solid/base/generic/generate_all_types2.h" #else /* Create the cuda kernels - copy regular size */ SOLID_KERNELS_ELEMWISE2_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy_regular, \ SOLID_ASSIGN(SDXTYPE, SDXTYPE2, _ptr1, _ptr2)) /* Create the cuda kernels - copy with irregular size */ SOLID_KERNELS_ELEMWISE2B_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy, \ SOLID_ASSIGN(SDXTYPE, SDXTYPE2, _ptr1, _ptr2)) /* -------------------------------------------------------------------- */ SOLID_API int SOLID_FUNCTION2(copy_regular)(int ndims, const size_t *size, const ptrdiff_t *strides1, void *ptr1, const ptrdiff_t *strides2, void *ptr2, hipStream_t stream) /* -------------------------------------------------------------------- */ { int result; /* Set up and launch the appropriate kernel */ SOLID_LAUNCH_ELEMWISE2_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy_regular, 0, stream, result); return result; } /* -------------------------------------------------------------------- */ SOLID_API int SOLID_FUNCTION2(copy)(int ndims1, const size_t *size1, const ptrdiff_t *strides1, void *ptr1, int ndims2, const size_t *size2, const ptrdiff_t *strides2, void *ptr2, hipStream_t stream) /* -------------------------------------------------------------------- */ { int regular = 1, i; int result; /* Check whether the tensor sizes match */ if (ndims1 == ndims2) { if (size1 != size2) { for (i = 0; i < ndims1; i++) { if (size1[i] != size2[i]) { regular = 0; break; } } } } else if ((ndims1 != 0) && (ndims2 != 0)) { regular = 0; } /* Call regular or full copy */ if (regular) { result = SOLID_FUNCTION2(copy_regular)(ndims1, size1, strides1, ptr1, strides2, ptr2, stream); } else { SOLID_LAUNCH_ELEMWISE2B_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy, 0, stream, result); } return result; } #endif
3aee43626abd3c00b58eafa2a1634745ee742071.cu
/* ------------------------------------------------------------------------ */ /* Copyright 2018, IBM Corp. */ /* */ /* Licensed under the Apache License, Version 2.0 (the "License"); */ /* you may not use this file except in compliance with the License. */ /* You may obtain a copy of the License at */ /* */ /* http://www.apache.org/licenses/LICENSE-2.0 */ /* */ /* Unless required by applicable law or agreed to in writing, software */ /* distributed under the License is distributed on an "AS IS" BASIS, */ /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* See the License for the specific language governing permissions and */ /* limitations under the License. */ /* ------------------------------------------------------------------------ */ #ifndef SD_TEMPLATE_FILE #define SD_TEMPLATE_FILE "core/gpu/template_copy.cu" #include "solid.h" #include "solid/base/generic/dtype_assign.h" #include "solid/base/gpu/dtype_gpu.h" #include "solid/core/gpu/apply_elemwise2.h" #include "solid/core/gpu/apply_elemwise2b.h" #include "solid/base/generic/generate_all_types2.h" #else /* Create the cuda kernels - copy regular size */ SOLID_KERNELS_ELEMWISE2_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy_regular, \ SOLID_ASSIGN(SDXTYPE, SDXTYPE2, _ptr1, _ptr2)) /* Create the cuda kernels - copy with irregular size */ SOLID_KERNELS_ELEMWISE2B_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy, \ SOLID_ASSIGN(SDXTYPE, SDXTYPE2, _ptr1, _ptr2)) /* -------------------------------------------------------------------- */ SOLID_API int SOLID_FUNCTION2(copy_regular)(int ndims, const size_t *size, const ptrdiff_t *strides1, void *ptr1, const ptrdiff_t *strides2, void *ptr2, cudaStream_t stream) /* -------------------------------------------------------------------- */ { int result; /* Set up and launch the appropriate kernel */ SOLID_LAUNCH_ELEMWISE2_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy_regular, 0, stream, result); return result; } /* -------------------------------------------------------------------- */ SOLID_API int SOLID_FUNCTION2(copy)(int ndims1, const size_t *size1, const ptrdiff_t *strides1, void *ptr1, int ndims2, const size_t *size2, const ptrdiff_t *strides2, void *ptr2, cudaStream_t stream) /* -------------------------------------------------------------------- */ { int regular = 1, i; int result; /* Check whether the tensor sizes match */ if (ndims1 == ndims2) { if (size1 != size2) { for (i = 0; i < ndims1; i++) { if (size1[i] != size2[i]) { regular = 0; break; } } } } else if ((ndims1 != 0) && (ndims2 != 0)) { regular = 0; } /* Call regular or full copy */ if (regular) { result = SOLID_FUNCTION2(copy_regular)(ndims1, size1, strides1, ptr1, strides2, ptr2, stream); } else { SOLID_LAUNCH_ELEMWISE2B_TYPES(SDXTYPE, SDXTYPE2, 2, UNROLL, copy, 0, stream, result); } return result; } #endif
67d2dff6bfccbdf293c48c390e5bc0a82bd84f94.hip
// !!! This is a file automatically generated by hipify!!! /**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include "mpi.h" #include "mp.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include <string.h> #include <stdio.h> #include "assert.h" #include <limits.h> #include <sys/types.h> #include <unistd.h> #include "prof.h" #include "hip/hip_runtime_api.h" #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,hipGetErrorString(result));\ exit(-1); \ } \ assert(hipSuccess == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 128*1024 //64*1024 #define ITER_COUNT_SMALL 200 #define ITER_COUNT_LARGE 50 struct prof prof_normal; struct prof prof_async; int prof_start = 0; int prof_idx = 0; int comm_size, my_rank, peer; int steps_per_batch = 16, batches_inflight = 4; int enable_async = 1; __device__ int counter; __device__ int clockrate; __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } /*application and pack buffers*/ void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL; int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0; hipStream_t stream; size_t buf_size; /*mp specific objects*/ mp_request_t *sreq = NULL; mp_request_t *rreq = NULL; mp_reg_t sreg, rreg; double time_start, time_stop; int sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; if (validate) { mp_dbg_msg("initializing the buffer \n"); CUDA_CHECK(hipMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); CUDA_CHECK(hipDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("[%d] iteration :%d \n", my_rank, j); if (!my_rank && prof) PROF(prof, prof_idx++); if (kernel_time > 0) { hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time); if (!use_async) { CUDA_CHECK(hipStreamSynchronize(stream)); } } if (!my_rank && prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx); if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); } if (!my_rank && prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } req_inflight++; mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, req_inflight); if (!my_rank && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx); int buf_idx = (j + prepost_depth); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (!my_rank && prof) PROF(prof, prof_idx++); /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { if (use_async) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx); MP_CHECK(mp_wait(&rreq[complete_rreq_idx])); mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight); for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_req_idx); MP_CHECK(mp_wait(&sreq[complete_req_idx])); mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("[%d] after waiting on send, req_inflight: %d \n", my_rank, req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ if (use_async) { while (rreq_inflight > 0) { mp_wait(&rreq[complete_rreq_idx]); mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } while (req_inflight > 0) { mp_wait(&sreq[complete_req_idx]); mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (!my_rank && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } if (validate) { CUDA_CHECK(hipMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, hipMemcpyDefault)); //CUDA_CHECK(hipDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n", my_rank, i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(hipDeviceSynchronize()); return latency; } int main (int c, char *v[]) { int iter_count, size, dev_count, local_rank, dev_id = 0; int kernel_time = 20; int comm_comp_ratio = 0; int validate = 0; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KRENEL_TIME"); if (value != NULL) { kernel_time = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } size = 0; value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } MPI_Init(&c, &v); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(hipGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } dev_id = local_rank%dev_count; fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d \n", my_rank, local_rank, dev_count, dev_id); CUDA_CHECK(hipSetDevice(dev_id)); CUDA_CHECK(hipFree(0)); hipDeviceProp_t prop; CUDA_CHECK(hipGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice)); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (!my_rank) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); } prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count; req_max_inflight = steps_per_batch*batches_inflight; rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth); /*allocating requests*/ sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); if (!my_rank) fprintf(stdout, "%10s %10s %10s %10s %10s %10s \n", "Size", "KernelTime", "No-asyncl", "No-async+Kernel", "Async", "Async+Kernel"); for (size=1; size<=MAX_SIZE; size*=2) { double latency; char *tags = "kernel|send|recv|prepost|wait|"; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size*iter_count; buf = malloc (buf_size); memset(buf, 0, buf_size); CUDA_CHECK(hipMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(hipMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(hipMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (!my_rank) { if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } if (!my_rank) fprintf(stdout, "%8d", size); /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time; if (!my_rank) fprintf(stdout, "\t %d", kernel_time); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStart(); /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStart(); /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency); hipProfilerStop(); if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (!my_rank) { prof_dump(&prof_normal); prof_dump(&prof_async); } mp_deregister(&sreg); mp_deregister(&rreg); CUDA_CHECK(hipFree(sbuf_d)); CUDA_CHECK(hipFree(rbuf_d)); free(buf); } CUDA_CHECK(hipStreamDestroy(stream)); free(sreq); free(rreq); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
67d2dff6bfccbdf293c48c390e5bc0a82bd84f94.cu
/**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include "mpi.h" #include "mp.h" #include "cuda.h" #include "cuda_runtime.h" #include <string.h> #include <stdio.h> #include "assert.h" #include <limits.h> #include <sys/types.h> #include <unistd.h> #include "prof.h" #include "cuda_profiler_api.h" #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,cudaGetErrorString(result));\ exit(-1); \ } \ assert(cudaSuccess == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 128*1024 //64*1024 #define ITER_COUNT_SMALL 200 #define ITER_COUNT_LARGE 50 struct prof prof_normal; struct prof prof_async; int prof_start = 0; int prof_idx = 0; int comm_size, my_rank, peer; int steps_per_batch = 16, batches_inflight = 4; int enable_async = 1; __device__ int counter; __device__ int clockrate; __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } /*application and pack buffers*/ void *buf = NULL, *sbuf_d = NULL, *rbuf_d = NULL; int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0; cudaStream_t stream; size_t buf_size; /*mp specific objects*/ mp_request_t *sreq = NULL; mp_request_t *rreq = NULL; mp_reg_t sreg, rreg; double time_start, time_stop; int sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; if (validate) { mp_dbg_msg("initializing the buffer \n"); CUDA_CHECK(cudaMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); CUDA_CHECK(cudaDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("[%d] iteration :%d \n", my_rank, j); if (!my_rank && prof) PROF(prof, prof_idx++); if (kernel_time > 0) { dummy_kernel <<<1, 1, 0, stream>>> (kernel_time); if (!use_async) { CUDA_CHECK(cudaStreamSynchronize(stream)); } } if (!my_rank && prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; mp_dbg_msg("[%d] posted send request: %d \n", my_rank, req_idx); if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); } if (!my_rank && prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } req_inflight++; mp_dbg_msg("[%d] requests inflight: %d \n", my_rank, req_inflight); if (!my_rank && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("[%d] posted recv request: %d \n", my_rank, rreq_idx); int buf_idx = (j + prepost_depth); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (!my_rank && prof) PROF(prof, prof_idx++); /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { if (use_async) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("[%d] waiting on recv request: %d \n", my_rank, complete_rreq_idx); MP_CHECK(mp_wait(&rreq[complete_rreq_idx])); mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } mp_dbg_msg("[%d] after waiting on recv, rreq_inflight: %d \n", my_rank, rreq_inflight); for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("[%d] waiting on send request: %d \n", my_rank, complete_req_idx); MP_CHECK(mp_wait(&sreq[complete_req_idx])); mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("[%d] after waiting on send, req_inflight: %d \n", my_rank, req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ if (use_async) { while (rreq_inflight > 0) { mp_wait(&rreq[complete_rreq_idx]); mp_dbg_msg("[%d] completed recv request: %d \n", my_rank, complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } while (req_inflight > 0) { mp_wait(&sreq[complete_req_idx]); mp_dbg_msg("[%d] completed send request: %d \n", my_rank, complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (!my_rank && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } if (validate) { CUDA_CHECK(cudaMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, cudaMemcpyDefault)); //CUDA_CHECK(cudaDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("[%d] validation check failed index: %d expected: %d actual: %d \n", my_rank, i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(cudaDeviceSynchronize()); return latency; } int main (int c, char *v[]) { int iter_count, size, dev_count, local_rank, dev_id = 0; int kernel_time = 20; int comm_comp_ratio = 0; int validate = 0; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KRENEL_TIME"); if (value != NULL) { kernel_time = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } size = 0; value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } MPI_Init(&c, &v); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(cudaGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } dev_id = local_rank%dev_count; fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d \n", my_rank, local_rank, dev_count, dev_id); CUDA_CHECK(cudaSetDevice(dev_id)); CUDA_CHECK(cudaFree(0)); cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice)); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (!my_rank) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); } prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count; req_max_inflight = steps_per_batch*batches_inflight; rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth); /*allocating requests*/ sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); if (!my_rank) fprintf(stdout, "%10s %10s %10s %10s %10s %10s \n", "Size", "KernelTime", "No-asyncl", "No-async+Kernel", "Async", "Async+Kernel"); for (size=1; size<=MAX_SIZE; size*=2) { double latency; char *tags = "kernel|send|recv|prepost|wait|"; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size*iter_count; buf = malloc (buf_size); memset(buf, 0, buf_size); CUDA_CHECK(cudaMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(cudaMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(cudaMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (!my_rank) { if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } if (!my_rank) fprintf(stdout, "%8d", size); /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time; if (!my_rank) fprintf(stdout, "\t %d", kernel_time); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStart(); /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStart(); /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/); if (!my_rank) fprintf(stdout, "\t %8.2lf \n", latency); cudaProfilerStop(); if (!my_rank && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (!my_rank) { prof_dump(&prof_normal); prof_dump(&prof_async); } mp_deregister(&sreg); mp_deregister(&rreg); CUDA_CHECK(cudaFree(sbuf_d)); CUDA_CHECK(cudaFree(rbuf_d)); free(buf); } CUDA_CHECK(cudaStreamDestroy(stream)); free(sreq); free(rreq); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
a17f9dfbd2604ed0ff96cb9b2c7411d563c14c5c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { __global__ void vectorAdd(const float *a, const float *b, float *c, int num) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num) { c[i] = a[i] + b[i]; } } __global__ void initImage(unsigned char *data, int cols, int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; int offset = 3 * (x + y * cols); data[offset] = 255 * ((float)x / cols); data[offset + 1] = 255 * ((float)y / rows); data[offset + 2] = 128; } }
a17f9dfbd2604ed0ff96cb9b2c7411d563c14c5c.cu
extern "C" { __global__ void vectorAdd(const float *a, const float *b, float *c, int num) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num) { c[i] = a[i] + b[i]; } } __global__ void initImage(unsigned char *data, int cols, int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; int offset = 3 * (x + y * cols); data[offset] = 255 * ((float)x / cols); data[offset + 1] = 255 * ((float)y / rows); data[offset + 2] = 128; } }
9f3be504d7551c783b9626bd3160228df407a4ed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; extern double *d_m0; extern double *d_h0; extern double *d_jj0; extern double *d_d0; extern double *d_f0; extern double *d_X0; extern double *d_dVdt; extern double *dcai; __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); //hipDeviceSynchronize(); } __global__ void comp_dV2(double *d_V ,double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); //hipDeviceSynchronize(); } __device__ void comp_it(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, int I, int i, int k, double *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina double gna = 23; double ena = ((R*temp) / frdy)*log(nao / nai); double am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ double esi[tpb]; __shared__ double isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); double ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); double bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); double af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); double bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); double taud = 1 / (ad + bd); double tauf = 1 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); double bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); double Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/ ((d_V[k+nx+2+1+2*i] + 77)*exp(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 double gk1 = 0.6047*(sqrt(ko / 5.4)); double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1 + exp(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31))) /(1 + exp(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp double gkp = 0.0183; double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1 / (1 + exp((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dVdt), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(double *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dVdt); //hipDeviceSynchronize(); } __device__ void gate(double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai if(i>0 && i<5){ d_dVdt[id] = -d_it[id] + (-st); }else{ d_dVdt[id] = -d_it[id]; } d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t, int num){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE_stim), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); //bpg = ((nx-5)*ny+tpb-1)/tpb; //comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_ODE), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 0); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew); //hipDeviceSynchronize(); }
9f3be504d7551c783b9626bd3160228df407a4ed.cu
#include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; extern double *d_m0; extern double *d_h0; extern double *d_jj0; extern double *d_d0; extern double *d_f0; extern double *d_X0; extern double *d_dVdt; extern double *dcai; __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; boundary<<<bpg, tpb>>>(d_V); //cudaDeviceSynchronize(); } __global__ void comp_dV2(double *d_V ,double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); //cudaDeviceSynchronize(); } __device__ void comp_it(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, int I, int i, int k, double *d_t) { //int id = k+nx+2+1+2*j; d_it[k] = 0.0; //comp_ina double gna = 23; double ena = ((R*temp) / frdy)*log(nao / nai); double am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); d_it[k] += gna*d_m0[k] * d_m0[k] * d_m0[k] * d_h0[k] * d_jj0[k] * (d_V[k+nx+2+1+2*i] - ena); //comp_ical __shared__ double esi[tpb]; __shared__ double isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); double ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); double bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); double af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); double bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); double taud = 1 / (ad + bd); double tauf = 1 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); isi[I] = 0.09*d_d0[k] * d_f0[k] * (d_V[k+nx+2+1+2*i] - esi[I]); dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + isi[I]; //comp_ik double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); double bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); double Xi; if (d_V[k+nx+2+1+2*i] > -100) { Xi = 2.837*(exp(0.04*(d_V[k+nx+2+1+2*i] + 77)) - 1)/ ((d_V[k+nx+2+1+2*i] + 77)*exp(0.04*(d_V[k+nx+2+1+2*i] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X0[k] * Xi*(d_V[k+nx+2+1+2*i] - ek); //comp_ik1 double gk1 = 0.6047*(sqrt(ko / 5.4)); double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1 + exp(0.2385*(d_V[k+nx+2+1+2*i] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[k+nx+2+1+2*i] - ek1 + 5.476))+ exp(0.06175*(d_V[k+nx+2+1+2*i] - ek1 - 594.31))) /(1 + exp(-0.5143*(d_V[k+nx+2+1+2*i] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[k+nx+2+1+2*i] - ek1); //comp_ikp double gkp = 0.0183; double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1 / (1 + exp((7.488 - d_V[k+nx+2+1+2*i]) / 5.98)); d_it[k] += gkp*kp*(d_V[k+nx+2+1+2*i] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[k+nx+2+1+2*i] + 59.87); } __global__ void comp_dVdt(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_dVdt<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); } __global__ void plane_waves(double *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; plane_waves<<<bpg, tpb>>>(d_dVdt); //cudaDeviceSynchronize(); } __device__ void gate(double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, int k){ d_m[k] = d_m0[k]; d_h[k] = d_h0[k]; d_jj[k] = d_jj0[k]; d_d[k] = d_d0[k]; d_f[k] = d_f0[k]; d_X[k] = d_X0[k]; } __global__ void comp_ODE_stim(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id]) + 0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai if(i>0 && i<5){ d_dVdt[id] = -d_it[id] + (-st); }else{ d_dVdt[id] = -d_it[id]; } d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } __global__ void comp_ODE(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *dcai, double *d_X, double *d_it, double *d_m0, double *d_h0, double *d_jj0, double *d_d0, double *d_f0, double *d_X0, double *d_dVdt, double *d_t, int num){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); int j = k - i*nx; int id = i*nx+j; int k1, k0, ttt; int vid = (i+1)*(nx+2)+j+1; if(d_dVdt[id]>0){ k0 = 5; }else{ k0 = 1; } k1 = k0 + (int)(fabs(d_dVdt[id])+0.5); if (k1 >(int)(dt_max / dt_min)){ k1 = (int)(dt_max / dt_min); } d_t[id] = dt_max / k1; for (ttt = 0; ttt < k1; ttt++){ //from t to t+dt_max, t=t+dt comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, I, i, id, d_t); gate(d_m, d_h, d_jj, d_d, d_f, d_X, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, id); d_cai[id] = d_cai[id] + dcai[id]*d_t[id];//renew Cai d_dVdt[id] = -d_it[id]; d_V[vid] = d_V[vid] + d_t[id]*d_dVdt[id]; } } } void ODE_stim(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_ODE_stim<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t); //bpg = ((nx-5)*ny+tpb-1)/tpb; //comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 5); } void ODE(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_ODE<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, dcai, d_X, d_it, d_m0, d_h0, d_jj0, d_d0, d_f0, d_X0, d_dVdt, d_t, 0); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew); //cudaDeviceSynchronize(); }
c66fe2fe215069bbbd10508f8f86331749552f21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <iostream> #include <algorithm> #include "time.h" using namespace std; int factorial(int n) { return (n == 0 || n == 1) ? 1 : factorial(n-1) * n; } __global__ void calculate_edges(int *perms, int *children, int *stops, int *edges, int NUMNODES, int NUMPERMS) { /* Since the permutation array is a flattened 2D array which was NUMNODES wide and NUMPERMS long, then we must start at the begining of every row which would be offset by NUMNODES. */ int element = (blockIdx.x * blockDim.x + threadIdx.x) * NUMNODES; int total = NUMNODES * NUMPERMS; // make sure we do not exceed the size of the permutation array int edge_counter = 0; // keep track of where in the edge array we are putting the next edge label int last_index = 0; // keep track of the last index from the stop array. int edge_start = (blockIdx.x * blockDim.x + threadIdx.x) * (NUMNODES-1); // calculate where in the edge array we should begin placing labels if(element < total) { // Only go thorugh each NUMNODE group of labels for(int i = element; i < element + NUMNODES; i++) { // check for sentinel value of -1 if(stops[i % NUMNODES] != -1) { // If this is our first time we start at 0, otherwise we continue from the the last index for(int j = (last_index == 0) ? 0 : last_index+1; j<=stops[i % NUMNODES]; j++) // place the absolute difference of each end point into the edge array edges[edge_start + edge_counter++] = abs(perms[i] - perms[children[j] + element]); last_index = stops[i % NUMNODES]; } } } } __global__ void check_gracefulness(int *edges, int *graceful_labels, int NUMNODES, int NUMPERMS) { /* Go through edge array and check for any duplicates. If there are duplicates found, exit the loop and mark this label as being nongraceful , which is designated by a -1 in the label array. If no duplicates are found, the labeling is graceful and the index of the permutation is stored. */ int element = (blockIdx.x * blockDim.x + threadIdx.x) * (NUMNODES-1); int total = NUMNODES * NUMPERMS; bool graceful = true; if(element < total) { for(int i = element; i < element + NUMNODES-1; i++) { int current = edges[i]; for(int j = i + 1; j < element + NUMNODES-1; j++) { if(current == edges[j]) { graceful = false; break; } } if(!graceful) break; } if(graceful) graceful_labels[element / (NUMNODES-1)] = element/(NUMNODES-1)*NUMNODES; if(!graceful) graceful_labels[element / (NUMNODES-1)] = -1; } } void execute_gpu(int perms[], int children[], int stops[], int graceful_labels[], int edges[], int NUMNODES, int NUMPERMS) { int *d_perms, *d_children, *d_graceful_labels, *d_stops, *d_edges; // define sizes for convenience const size_t perm_size = NUMNODES*NUMPERMS*sizeof(int); const size_t edge_size = (NUMNODES-1)*NUMPERMS*sizeof(int); const size_t child_size = (NUMNODES-1)*sizeof(int); const size_t stop_size = NUMNODES*sizeof(int); const size_t label_size = NUMPERMS*sizeof(int); // 768 cores available on my home computer // 1024 cores available on starship int numCores = (NUMNODES * NUMPERMS)/ 1024 + 1; int numThreads = 1024; // Allocate memory on GPU hipMalloc(&d_perms, perm_size); hipMalloc(&d_edges, edge_size); hipMalloc(&d_children, child_size); hipMalloc(&d_stops, stop_size); hipMalloc(&d_graceful_labels, label_size); // Copy over necessary arrays to GPU hipMemcpy(d_perms, perms, perm_size, hipMemcpyHostToDevice); hipMemcpy(d_children, children, child_size, hipMemcpyHostToDevice); hipMemcpy(d_stops, stops, stop_size, hipMemcpyHostToDevice); // Calculate edge labelings for each permutation hipLaunchKernelGGL(( calculate_edges), dim3(numCores), dim3(numThreads), 0, 0, d_perms, d_children, d_stops, d_edges, NUMNODES, NUMPERMS); // Don't need these for the next step, so just free the memory up. hipFree(&d_perms); hipFree(&d_stops); hipFree(&d_children); // For debugging purposes only // hipMemcpy(edges, d_edges, edge_size, hipMemcpyDeviceToHost); // Now check the gracefulness of the given edge labelings. hipLaunchKernelGGL(( check_gracefulness), dim3(numCores), dim3(numThreads), 0, 0, d_edges, d_graceful_labels, NUMNODES, NUMPERMS); // Copy back the evaluated labelings hipMemcpy(graceful_labels, d_graceful_labels, label_size, hipMemcpyDeviceToHost); // Free up the rest of the memory hipFree(&d_graceful_labels); hipFree(&d_edges); } int main() { //const int NUMNODES = 3; //const int NUMPERMS = factorial(NUMNODES); //int stops [] = {1, -1, -1}; // const int NUMNODES = 9; // const int NUMPERMS = 100500; // int stops [] = {2,5,7,-1,-1,-1,-1,-1,-1}; //const int NUMNODES = 8; //const int NUMPERMS = 10000*NUMNODES; //int stops [] = {1,3,5,-1,-1,-1,6,-1}; //const int NUMNODES = 11; //const int NUMPERMS = 1000*NUMNODES; //int stops [] = {2,-1,5,-1, -1,6,-1,7,9,-1,-1}; const int NUMNODES = 12; const int NUMPERMS = 1000*NUMNODES; int stops [] = {1, 4, 6, -1, -1, -1, 9, -1, -1, 10, -1}; int found = 0; bool has_next = false; bool has_started = false; int children[NUMNODES-1], labels[NUMNODES]; float iter = 0; // generate both children and label array for(int i = 0; i < NUMNODES; i++) { labels[i] = i; if(i < NUMNODES - 1) children[i] = i+1; } do{ int edges[NUMPERMS*(NUMNODES-1)], perms[NUMPERMS*NUMNODES], graceful_labels[NUMPERMS]; // create all permutations of given nodes for(int i = 0; i < NUMPERMS; i++) { for(int j = 0; j < NUMNODES; j++) { perms[i*NUMNODES+j] = labels[j]; //edges[i*NUMNODES+j] = 0; } graceful_labels[i] = -1; has_next = next_permutation(labels, labels+NUMNODES); if(!has_next) break; } if(!has_started) { has_started = true; init(NUMNODES); } execute_gpu(perms, children, stops, graceful_labels, edges, NUMNODES, NUMPERMS); for(int i = 0; i < NUMPERMS; i++) { if(graceful_labels[i] != -1) { for(int j = 0; j < NUMNODES; j++) cout << perms[graceful_labels[i] + j] << " "; cout << endl; found=1; break; } } iter++; }while(has_next && found != 1); finish(NUMNODES); cout << "Found " << found << " graceful labelings." << endl; cout << "Took " << iter << " iterations" << endl; return 0; }
c66fe2fe215069bbbd10508f8f86331749552f21.cu
#include <stdlib.h> #include <iostream> #include <algorithm> #include "time.h" using namespace std; int factorial(int n) { return (n == 0 || n == 1) ? 1 : factorial(n-1) * n; } __global__ void calculate_edges(int *perms, int *children, int *stops, int *edges, int NUMNODES, int NUMPERMS) { /* Since the permutation array is a flattened 2D array which was NUMNODES wide and NUMPERMS long, then we must start at the begining of every row which would be offset by NUMNODES. */ int element = (blockIdx.x * blockDim.x + threadIdx.x) * NUMNODES; int total = NUMNODES * NUMPERMS; // make sure we do not exceed the size of the permutation array int edge_counter = 0; // keep track of where in the edge array we are putting the next edge label int last_index = 0; // keep track of the last index from the stop array. int edge_start = (blockIdx.x * blockDim.x + threadIdx.x) * (NUMNODES-1); // calculate where in the edge array we should begin placing labels if(element < total) { // Only go thorugh each NUMNODE group of labels for(int i = element; i < element + NUMNODES; i++) { // check for sentinel value of -1 if(stops[i % NUMNODES] != -1) { // If this is our first time we start at 0, otherwise we continue from the the last index for(int j = (last_index == 0) ? 0 : last_index+1; j<=stops[i % NUMNODES]; j++) // place the absolute difference of each end point into the edge array edges[edge_start + edge_counter++] = abs(perms[i] - perms[children[j] + element]); last_index = stops[i % NUMNODES]; } } } } __global__ void check_gracefulness(int *edges, int *graceful_labels, int NUMNODES, int NUMPERMS) { /* Go through edge array and check for any duplicates. If there are duplicates found, exit the loop and mark this label as being nongraceful , which is designated by a -1 in the label array. If no duplicates are found, the labeling is graceful and the index of the permutation is stored. */ int element = (blockIdx.x * blockDim.x + threadIdx.x) * (NUMNODES-1); int total = NUMNODES * NUMPERMS; bool graceful = true; if(element < total) { for(int i = element; i < element + NUMNODES-1; i++) { int current = edges[i]; for(int j = i + 1; j < element + NUMNODES-1; j++) { if(current == edges[j]) { graceful = false; break; } } if(!graceful) break; } if(graceful) graceful_labels[element / (NUMNODES-1)] = element/(NUMNODES-1)*NUMNODES; if(!graceful) graceful_labels[element / (NUMNODES-1)] = -1; } } void execute_gpu(int perms[], int children[], int stops[], int graceful_labels[], int edges[], int NUMNODES, int NUMPERMS) { int *d_perms, *d_children, *d_graceful_labels, *d_stops, *d_edges; // define sizes for convenience const size_t perm_size = NUMNODES*NUMPERMS*sizeof(int); const size_t edge_size = (NUMNODES-1)*NUMPERMS*sizeof(int); const size_t child_size = (NUMNODES-1)*sizeof(int); const size_t stop_size = NUMNODES*sizeof(int); const size_t label_size = NUMPERMS*sizeof(int); // 768 cores available on my home computer // 1024 cores available on starship int numCores = (NUMNODES * NUMPERMS)/ 1024 + 1; int numThreads = 1024; // Allocate memory on GPU cudaMalloc(&d_perms, perm_size); cudaMalloc(&d_edges, edge_size); cudaMalloc(&d_children, child_size); cudaMalloc(&d_stops, stop_size); cudaMalloc(&d_graceful_labels, label_size); // Copy over necessary arrays to GPU cudaMemcpy(d_perms, perms, perm_size, cudaMemcpyHostToDevice); cudaMemcpy(d_children, children, child_size, cudaMemcpyHostToDevice); cudaMemcpy(d_stops, stops, stop_size, cudaMemcpyHostToDevice); // Calculate edge labelings for each permutation calculate_edges<<<numCores, numThreads>>>(d_perms, d_children, d_stops, d_edges, NUMNODES, NUMPERMS); // Don't need these for the next step, so just free the memory up. cudaFree(&d_perms); cudaFree(&d_stops); cudaFree(&d_children); // For debugging purposes only // cudaMemcpy(edges, d_edges, edge_size, cudaMemcpyDeviceToHost); // Now check the gracefulness of the given edge labelings. check_gracefulness<<<numCores, numThreads>>>(d_edges, d_graceful_labels, NUMNODES, NUMPERMS); // Copy back the evaluated labelings cudaMemcpy(graceful_labels, d_graceful_labels, label_size, cudaMemcpyDeviceToHost); // Free up the rest of the memory cudaFree(&d_graceful_labels); cudaFree(&d_edges); } int main() { //const int NUMNODES = 3; //const int NUMPERMS = factorial(NUMNODES); //int stops [] = {1, -1, -1}; // const int NUMNODES = 9; // const int NUMPERMS = 100500; // int stops [] = {2,5,7,-1,-1,-1,-1,-1,-1}; //const int NUMNODES = 8; //const int NUMPERMS = 10000*NUMNODES; //int stops [] = {1,3,5,-1,-1,-1,6,-1}; //const int NUMNODES = 11; //const int NUMPERMS = 1000*NUMNODES; //int stops [] = {2,-1,5,-1, -1,6,-1,7,9,-1,-1}; const int NUMNODES = 12; const int NUMPERMS = 1000*NUMNODES; int stops [] = {1, 4, 6, -1, -1, -1, 9, -1, -1, 10, -1}; int found = 0; bool has_next = false; bool has_started = false; int children[NUMNODES-1], labels[NUMNODES]; float iter = 0; // generate both children and label array for(int i = 0; i < NUMNODES; i++) { labels[i] = i; if(i < NUMNODES - 1) children[i] = i+1; } do{ int edges[NUMPERMS*(NUMNODES-1)], perms[NUMPERMS*NUMNODES], graceful_labels[NUMPERMS]; // create all permutations of given nodes for(int i = 0; i < NUMPERMS; i++) { for(int j = 0; j < NUMNODES; j++) { perms[i*NUMNODES+j] = labels[j]; //edges[i*NUMNODES+j] = 0; } graceful_labels[i] = -1; has_next = next_permutation(labels, labels+NUMNODES); if(!has_next) break; } if(!has_started) { has_started = true; init(NUMNODES); } execute_gpu(perms, children, stops, graceful_labels, edges, NUMNODES, NUMPERMS); for(int i = 0; i < NUMPERMS; i++) { if(graceful_labels[i] != -1) { for(int j = 0; j < NUMNODES; j++) cout << perms[graceful_labels[i] + j] << " "; cout << endl; found=1; break; } } iter++; }while(has_next && found != 1); finish(NUMNODES); cout << "Found " << found << " graceful labelings." << endl; cout << "Took " << iter << " iterations" << endl; return 0; }
55b68f6d07177447bb8425857951181488c3a864.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _IMAGEPROCESSING_KERNEL #define _IMAGEPROCESSING_KERNEL //#include <helper_math.h> //#include <helper_functions.h> #include <cstdio> #include "../include/timer.h" ///**************** CUDA useful functiions *****************/// /// Useful to read Error from CUDA Calls #define CUDA_CALL(x) {if((x) != hipSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", hipGetErrorString(hipGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void invert(unsigned char *d_inred, unsigned char *d_ingreen, unsigned char *d_inblue, unsigned char *d_outred, unsigned char *d_outgreen, unsigned char *d_outblue, int imageSize) { // Global thread index int threadID = threadIdx.x + blockIdx.x * blockDim.x; if(threadID < imageSize) { d_outred[threadID] = (unsigned char) 255 - d_inred[threadID]; d_outgreen[threadID] = (unsigned char) 255 - d_ingreen[threadID]; d_outblue[threadID] = (unsigned char) 255 - d_inblue[threadID]; } } __global__ void grayscale(unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize) { // Global thread index int threadID = threadIdx.x + blockIdx.x * blockDim.x; if(threadID < imageSize) { d_outgs[threadID] = 0.21*d_inred[threadID] + 0.72*d_ingreen[threadID] + 0.07*d_inblue[threadID]; } } extern "C" void executeKernelInvert( unsigned char* h_outred, unsigned char* h_outgreen, unsigned char* h_outblue, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outred, unsigned char* d_outgreen, unsigned char* d_outblue, int imageSize, size_t sizePixelsArray) { /// We're working with 1D size for blocks and grids /// Get the maximun block size from our device /*hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0);*/ //cudaDevProp prop; //int threadsPerBlock = prop.maxThreadsPerBlock; int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); /*printf("Device in\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", (int)d_inred[i], (int)d_ingreen[i], (int)d_inblue[i] ); } printf("Device out\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", (int)d_outred[i], (int)d_outgreen[i], (int)d_outblue[i] ); }*/ hipLaunchKernelGGL(( invert), dim3(gridSize), dim3(threadsPerBlock), 0, 0, d_inred, d_ingreen, d_inblue, d_outred, d_outgreen, d_outblue, imageSize); CUDA_CALL(hipMemcpy(h_outred, d_outred, sizePixelsArray,hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(h_outgreen, d_outgreen, sizePixelsArray,hipMemcpyDeviceToHost)); CUDA_CALL(hipMemcpy(h_outblue, d_outblue, sizePixelsArray,hipMemcpyDeviceToHost)); /*printf("\nAfter\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", h_outred[i], h_outgreen[i], h_outblue[i] ); }*/ } extern "C" void executeKernelGrayScale( unsigned char* h_outgs, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize, size_t sizePixelsArray){ int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); hipLaunchKernelGGL(( grayscale), dim3(gridSize), dim3(threadsPerBlock), 0, 0, d_inred, d_ingreen, d_inblue, d_outgs, imageSize); CUDA_CALL(hipMemcpy(h_outgs, d_outgs, sizePixelsArray,hipMemcpyDeviceToHost)); } /*extern "C" void executeKernelBinary( unsigned char* h_outgs, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize, size_t sizePixelsArray){ int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); hipLaunchKernelGGL(( grayscale), dim3(gridSize), dim3(threadsPerBlock), 0, 0, d_inred, d_ingreen, d_inblue, d_outgs, imageSize); binary<<<gridSize, threadsPerBlock>>>(d_outgs, d_outbinary, imageSize); }*/ #endif
55b68f6d07177447bb8425857951181488c3a864.cu
#ifndef _IMAGEPROCESSING_KERNEL #define _IMAGEPROCESSING_KERNEL //#include <helper_math.h> //#include <helper_functions.h> #include <cstdio> #include "../include/timer.h" ///**************** CUDA useful functiions *****************/// /// Useful to read Error from CUDA Calls #define CUDA_CALL(x) {if((x) != cudaSuccess){ \ printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \ printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \ exit(EXIT_FAILURE);}} __global__ void invert(unsigned char *d_inred, unsigned char *d_ingreen, unsigned char *d_inblue, unsigned char *d_outred, unsigned char *d_outgreen, unsigned char *d_outblue, int imageSize) { // Global thread index int threadID = threadIdx.x + blockIdx.x * blockDim.x; if(threadID < imageSize) { d_outred[threadID] = (unsigned char) 255 - d_inred[threadID]; d_outgreen[threadID] = (unsigned char) 255 - d_ingreen[threadID]; d_outblue[threadID] = (unsigned char) 255 - d_inblue[threadID]; } } __global__ void grayscale(unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize) { // Global thread index int threadID = threadIdx.x + blockIdx.x * blockDim.x; if(threadID < imageSize) { d_outgs[threadID] = 0.21*d_inred[threadID] + 0.72*d_ingreen[threadID] + 0.07*d_inblue[threadID]; } } extern "C" void executeKernelInvert( unsigned char* h_outred, unsigned char* h_outgreen, unsigned char* h_outblue, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outred, unsigned char* d_outgreen, unsigned char* d_outblue, int imageSize, size_t sizePixelsArray) { /// We're working with 1D size for blocks and grids /// Get the maximun block size from our device /*cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0);*/ //cudaDevProp prop; //int threadsPerBlock = prop.maxThreadsPerBlock; int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); /*printf("Device in\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", (int)d_inred[i], (int)d_ingreen[i], (int)d_inblue[i] ); } printf("Device out\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", (int)d_outred[i], (int)d_outgreen[i], (int)d_outblue[i] ); }*/ invert<<<gridSize, threadsPerBlock>>>(d_inred, d_ingreen, d_inblue, d_outred, d_outgreen, d_outblue, imageSize); CUDA_CALL(cudaMemcpy(h_outred, d_outred, sizePixelsArray,cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(h_outgreen, d_outgreen, sizePixelsArray,cudaMemcpyDeviceToHost)); CUDA_CALL(cudaMemcpy(h_outblue, d_outblue, sizePixelsArray,cudaMemcpyDeviceToHost)); /*printf("\nAfter\n"); for(int i = 0; i < imageSize ; i++){ printf("%d %d %d\n", h_outred[i], h_outgreen[i], h_outblue[i] ); }*/ } extern "C" void executeKernelGrayScale( unsigned char* h_outgs, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize, size_t sizePixelsArray){ int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); grayscale<<<gridSize, threadsPerBlock>>>(d_inred, d_ingreen, d_inblue, d_outgs, imageSize); CUDA_CALL(cudaMemcpy(h_outgs, d_outgs, sizePixelsArray,cudaMemcpyDeviceToHost)); } /*extern "C" void executeKernelBinary( unsigned char* h_outgs, unsigned char* d_inred, unsigned char* d_ingreen, unsigned char* d_inblue, unsigned char* d_outgs, int imageSize, size_t sizePixelsArray){ int threadsPerBlock = 128; printf("MaxThreadsPerBlock: %d \n", threadsPerBlock); int gridSize = (imageSize + threadsPerBlock-1)/threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock); grayscale<<<gridSize, threadsPerBlock>>>(d_inred, d_ingreen, d_inblue, d_outgs, imageSize); binary<<<gridSize, threadsPerBlock>>>(d_outgs, d_outbinary, imageSize); }*/ #endif
ccc83353298787c25cbcd4735d826366bbc78886.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { int is_bottom_shared = bottom[i]->is_shared(); caffe_gpu_axpby(count, Dtype(1.), top_diff, Dtype(is_bottom_shared), bottom_diff); // caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
ccc83353298787c25cbcd4735d826366bbc78886.cu
#include <cfloat> #include <vector> #include "caffe/layers/eltwise_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { int is_bottom_shared = bottom[i]->is_shared(); caffe_gpu_axpby(count, Dtype(1.), top_diff, Dtype(is_bottom_shared), bottom_diff); // caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, i, mask, bottom_diff); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
18bc14601499d62926912086aaabeacc71530f00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_NO_OPERATORS #include <ATen/OpMathType.h> #include <ATen/native/hip/GridSampler.h> #include <ATen/native/GridSamplerUtils.h> #include <ATen/native/hip/GridSampler.cuh> #include <ATen/native/hip/UpSample.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <c10/macros/Macros.h> #include <cmath> namespace at::native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { using opmath_t = at::opmath_type<scalar_t>; index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: opmath_t nw = (ix_se - ix) * (iy_se - iy); opmath_t ne = (ix - ix_sw) * (iy_sw - iy); opmath_t sw = (ix_ne - ix) * (iy - iy_ne); opmath_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { opmath_t out_acc = 0; if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } *out_ptr_NCHW = out_acc; } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(std::nearbyint(ix)); index_t iy_nearest = static_cast<index_t>(std::nearbyint(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize(x, inp_W, align_corners); iy = grid_sampler_unnormalize(y, inp_H, align_corners); opmath_t ix_nw = ::floor(ix); opmath_t iy_nw = ::floor(iy); const opmath_t tx = ix - ix_nw; const opmath_t ty = iy - iy_nw; auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { opmath_t coefficients[4]; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { coefficients[i] = cubic_interp1d( get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), tx); } *out_ptr_NCHW = cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty); } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(512) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { using opmath_t = at::opmath_type<scalar_t>; index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; scalar_t z = grid.data[grid_offset + 2 * grid_sCoor]; opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); opmath_t iz = grid_sampler_compute_source_index(z, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: opmath_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); opmath_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); opmath_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); opmath_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); opmath_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); opmath_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); opmath_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); opmath_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse opmath_t out_acc = 0; if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } *out_ptr_NCDHW = out_acc; } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(std::round(ix)); index_t iy_nearest = static_cast<index_t>(std::round(iy)); index_t iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } // Note [Passing pointer and offset to fastAtomicAdd] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // For its internal bounds checking, fastAtomicAdd needs to know where the destination address // lies relative to the entire tensor, so we pass the base grad_input.data and full offset information, // including batch * channel offset (NC_offset). template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false) TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span, const bool input_requires_grad) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. index_t gInp_sN; index_t gInp_sC; index_t gInp_sH; index_t gInp_sW; if (input_requires_grad) { gInp_sN = grad_input.strides[0]; gInp_sC = grad_input.strides[1]; gInp_sH = grad_input.strides[2]; gInp_sW = grad_input.strides[3]; } index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult); scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; if (input_requires_grad) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span); } // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { if (input_requires_grad) { index_t ix_nearest = static_cast<index_t>(std::round(ix)); index_t iy_nearest = static_cast<index_t>(std::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span); } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult); iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult); scalar_t ix_nw = ::floor(ix); scalar_t iy_nw = ::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; scalar_t x_coeffs[4]; scalar_t y_coeffs[4]; scalar_t x_coeffs_grad[4]; scalar_t y_coeffs_grad[4]; get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx); get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty); get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx); get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty); scalar_t gix = static_cast<scalar_t>(0); scalar_t giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) { scalar_t gOut = *gOut_ptr_NCHW; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { #pragma unroll 4 for (index_t j = 0; j < 4; ++j) { if (input_requires_grad) { // set input gradient. See Note [Passing pointer and offset to fastAtomicAdd]. add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH, gOut * x_coeffs[i] * y_coeffs[j], padding_mode, align_corners, NC_offset, grad_input_memory_span); } // set grid gradient scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners); gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut; giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut; } } } scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false) TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span, const bool input_requires_grad) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. int64_t gInp_sN = 0; int64_t gInp_sC = 0; int64_t gInp_sD = 0; int64_t gInp_sH = 0; int64_t gInp_sW = 0; if (input_requires_grad) { gInp_sN = grad_input.strides[0]; gInp_sC = grad_input.strides[1]; gInp_sD = grad_input.strides[2]; gInp_sH = grad_input.strides[3]; gInp_sW = grad_input.strides[4]; } index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset; if (input_requires_grad) { NC_offset = n * gInp_sN; } scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. if (input_requires_grad) { safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut, NC_offset, grad_input_memory_span); } // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { if (input_requires_grad) { auto ix_nearest = static_cast<index_t>(std::round(ix)); auto iy_nearest = static_cast<index_t>(std::round(iy)); auto iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW, NC_offset, grad_input_memory_span); } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace void launch_grid_sampler_2d_forward_kernel( const TensorBase &output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_2d(input, grid); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_2d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_3d_forward_kernel( const TensorBase &output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_3d(input, grid, interpolation_mode); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_3d_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 512)), dim3(512), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_2d_backward_kernel( const TensorBase &grad_input, const TensorBase &grad_grid, const TensorBase &grad_output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, std::array<bool,2> output_mask) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_2d(input, grid); // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); // If `input` gradient is not required, we skip computing it -- not needing to create // the tensor to hold the gradient can markedly increase performance. (`grid` gradient // is always computed.) auto input_requires_grad = output_mask[0]; int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0, input_requires_grad); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_2d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, input_requires_grad); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_3d_backward_kernel( const TensorBase &grad_input, const TensorBase &grad_grid, const TensorBase& grad_output, const TensorBase& input, const TensorBase& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, std::array<bool,2> output_mask) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_3d(input, grid, interpolation_mode); // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); int64_t count = N * D * H * W; auto input_requires_grad = output_mask[0]; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0, input_requires_grad); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( grid_sampler_3d_backward_kernel<scalar_t>) , dim3(GET_BLOCKS(count, 256)), dim3(256), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, input_requires_grad); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } } } // namespace at::native
18bc14601499d62926912086aaabeacc71530f00.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/OpMathType.h> #include <ATen/native/cuda/GridSampler.h> #include <ATen/native/GridSamplerUtils.h> #include <ATen/native/cuda/GridSampler.cuh> #include <ATen/native/cuda/UpSample.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/core/TensorBase.h> #include <ATen/Dispatch.h> #include <c10/macros/Macros.h> #include <cmath> namespace at::native { using namespace at::cuda::detail; using at::native::detail::GridSamplerInterpolation; using at::native::detail::GridSamplerPadding; namespace { template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { using opmath_t = at::opmath_type<scalar_t>; index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sH = output.strides[2]; index_t out_sW = output.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const index_t grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(::floor(ix)); index_t iy_nw = static_cast<index_t>(::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: opmath_t nw = (ix_se - ix) * (iy_se - iy); opmath_t ne = (ix - ix_sw) * (iy_sw - iy); opmath_t sw = (ix_ne - ix) * (iy - iy_ne); opmath_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { opmath_t out_acc = 0; if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { out_acc += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } *out_ptr_NCHW = out_acc; } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(std::nearbyint(ix)); index_t iy_nearest = static_cast<index_t>(std::nearbyint(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0); } } } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize(x, inp_W, align_corners); iy = grid_sampler_unnormalize(y, inp_H, align_corners); opmath_t ix_nw = std::floor(ix); opmath_t iy_nw = std::floor(iy); const opmath_t tx = ix - ix_nw; const opmath_t ty = iy - iy_nw; auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCHW = output.data + n * out_sN + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { opmath_t coefficients[4]; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { coefficients[i] = cubic_interp1d( get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 0, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 1, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw + 2, iy_nw - 1 + i, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners), tx); } *out_ptr_NCHW = cubic_interp1d( coefficients[0], coefficients[1], coefficients[2], coefficients[3], ty); } } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(512) __global__ void grid_sampler_3d_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { using opmath_t = at::opmath_type<scalar_t>; index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t out_sN = output.strides[0]; index_t out_sC = output.strides[1]; index_t out_sD = output.strides[2]; index_t out_sH = output.strides[3]; index_t out_sW = output.strides[4]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const index_t grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; scalar_t z = grid.data[grid_offset + 2 * grid_sCoor]; opmath_t ix = grid_sampler_compute_source_index(x, inp_W, padding_mode, align_corners); opmath_t iy = grid_sampler_compute_source_index(y, inp_H, padding_mode, align_corners); opmath_t iz = grid_sampler_compute_source_index(z, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(::floor(ix)); index_t iy_tnw = static_cast<index_t>(::floor(iy)); index_t iz_tnw = static_cast<index_t>(::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: opmath_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); opmath_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); opmath_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); opmath_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); opmath_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); opmath_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); opmath_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); opmath_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * bse opmath_t out_acc = 0; if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { out_acc += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } *out_ptr_NCDHW = out_acc; } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { index_t ix_nearest = static_cast<index_t>(std::round(ix)); index_t iy_nearest = static_cast<index_t>(std::round(iy)); index_t iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.data + n * inp_sN; auto out_ptr_NCDHW = output.data + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } // Note [Passing pointer and offset to fastAtomicAdd] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // For its internal bounds checking, fastAtomicAdd needs to know where the destination address // lies relative to the entire tensor, so we pass the base grad_input.data and full offset information, // including batch * channel offset (NC_offset). template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_2d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false) TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span, const bool input_requires_grad) { index_t C = input.sizes[1]; index_t inp_H = input.sizes[2]; index_t inp_W = input.sizes[3]; index_t out_H = grid.sizes[1]; index_t out_W = grid.sizes[2]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sH = input.strides[2]; index_t inp_sW = input.strides[3]; index_t grid_sN = grid.strides[0]; index_t grid_sH = grid.strides[1]; index_t grid_sW = grid.strides[2]; index_t grid_sCoor = grid.strides[3]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sH = grad_output.strides[2]; index_t gOut_sW = grad_output.strides[3]; // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. index_t gInp_sN; index_t gInp_sC; index_t gInp_sH; index_t gInp_sW; if (input_requires_grad) { gInp_sN = grad_input.strides[0]; gInp_sC = grad_input.strides[1]; gInp_sH = grad_input.strides[2]; gInp_sW = grad_input.strides[3]; } index_t gGrid_sW = grad_grid.strides[2]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t n = index / (out_H * out_W); const auto grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t x = grid.data[grid_offset]; scalar_t y = grid.data[grid_offset + grid_sCoor]; // multipliers for gradients on ix and iy scalar_t gix_mult, giy_mult; scalar_t ix = grid_sampler_compute_source_index_set_grad(x, inp_W, padding_mode, align_corners, &gix_mult); scalar_t iy = grid_sampler_compute_source_index_set_grad(y, inp_H, padding_mode, align_corners, &giy_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) index_t ix_nw = static_cast<index_t>(std::floor(ix)); index_t iy_nw = static_cast<index_t>(std::floor(iy)); index_t ix_ne = ix_nw + 1; index_t iy_ne = iy_nw; index_t ix_sw = ix_nw; index_t iy_sw = iy_nw + 1; index_t ix_se = ix_nw + 1; index_t iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, inp_ptr_NC += inp_sC, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { scalar_t gOut = *gOut_ptr_NCHW; if (input_requires_grad) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nw, ix_nw, gInp_sH, gInp_sW, inp_H, inp_W, nw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_ne, ix_ne, gInp_sH, gInp_sW, inp_H, inp_W, ne * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_sw, ix_sw, gInp_sH, gInp_sW, inp_H, inp_W, sw * gOut, NC_offset, grad_input_memory_span); safe_add_2d(grad_input.data, iy_se, ix_se, gInp_sH, gInp_sW, inp_H, inp_W, se * gOut, NC_offset, grad_input_memory_span); } // calculate grad_grid if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { scalar_t nw_val = inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW]; gix -= nw_val * (iy_se - iy) * gOut; giy -= nw_val * (ix_se - ix) * gOut; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { scalar_t ne_val = inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW]; gix += ne_val * (iy_sw - iy) * gOut; giy -= ne_val * (ix - ix_sw) * gOut; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { scalar_t sw_val = inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW]; gix -= sw_val * (iy - iy_ne) * gOut; giy += sw_val * (ix_ne - ix) * gOut; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { scalar_t se_val = inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW]; gix += se_val * (iy - iy_nw) * gOut; giy += se_val * (ix - ix_nw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { if (input_requires_grad) { index_t ix_nearest = static_cast<index_t>(std::round(ix)); index_t iy_nearest = static_cast<index_t>(std::round(iy)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, NC_offset += gInp_sC, gOut_ptr_NCHW += gOut_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_2d(grad_input.data, iy_nearest, ix_nearest, gInp_sH, gInp_sW, inp_H, inp_W, *gOut_ptr_NCHW, NC_offset, grad_input_memory_span); } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NHW // 2. directly assign to gGrid_ptr_NHW[0], gGrid_ptr_NHW[1] scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NHW[1] = static_cast<scalar_t>(0); } else if (interpolation_mode == GridSamplerInterpolation::Bicubic) { ix = grid_sampler_unnormalize_set_grad(x, inp_W, align_corners, &gix_mult); iy = grid_sampler_unnormalize_set_grad(y, inp_H, align_corners, &giy_mult); scalar_t ix_nw = std::floor(ix); scalar_t iy_nw = std::floor(iy); const scalar_t tx = ix - ix_nw; const scalar_t ty = iy - iy_nw; scalar_t x_coeffs[4]; scalar_t y_coeffs[4]; scalar_t x_coeffs_grad[4]; scalar_t y_coeffs_grad[4]; get_cubic_upsampling_coefficients<scalar_t>(x_coeffs, tx); get_cubic_upsampling_coefficients<scalar_t>(y_coeffs, ty); get_cubic_coefficients_grad<scalar_t>(x_coeffs_grad, tx); get_cubic_coefficients_grad<scalar_t>(y_coeffs_grad, ty); scalar_t gix = static_cast<scalar_t>(0); scalar_t giy = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCHW = grad_output.data + n * gOut_sN + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; scalar_t *inp_ptr_NC = input.data + n * inp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC+= inp_sC) { scalar_t gOut = *gOut_ptr_NCHW; #pragma unroll 4 for (index_t i = 0; i < 4; ++i) { #pragma unroll 4 for (index_t j = 0; j < 4; ++j) { if (input_requires_grad) { // set input gradient. See Note [Passing pointer and offset to fastAtomicAdd]. add_value_bounded<scalar_t>(grad_input.data, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, gInp_sW, gInp_sH, gOut * x_coeffs[i] * y_coeffs[j], padding_mode, align_corners, NC_offset, grad_input_memory_span); } // set grid gradient scalar_t val = get_value_bounded<scalar_t>(inp_ptr_NC, ix_nw - 1 + i, iy_nw - 1 + j, inp_W, inp_H, inp_sW, inp_sH, padding_mode, align_corners); gix -= val * x_coeffs_grad[i] * y_coeffs[j] * gOut; giy -= val * y_coeffs_grad[j] * x_coeffs[i] * gOut; } } } scalar_t *gGrid_ptr_NHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NHW[0] = gix_mult * gix; gGrid_ptr_NHW[1] = giy_mult * giy; } } } template <typename scalar_t, typename index_t> C10_LAUNCH_BOUNDS_1(256) __global__ void grid_sampler_3d_backward_kernel( const index_t nthreads, TensorInfo<scalar_t, index_t> grad_output, TensorInfo<scalar_t, index_t> input, TensorInfo<scalar_t, index_t> grid, TensorInfo<scalar_t, index_t> grad_input, // initialized to zeros (or unused if input_requires_grad is false) TensorInfo<scalar_t, index_t> grad_grid, // initialized to empty const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners, const index_t grad_input_memory_span, const bool input_requires_grad) { index_t C = input.sizes[1]; index_t inp_D = input.sizes[2]; index_t inp_H = input.sizes[3]; index_t inp_W = input.sizes[4]; index_t out_D = grid.sizes[1]; index_t out_H = grid.sizes[2]; index_t out_W = grid.sizes[3]; index_t inp_sN = input.strides[0]; index_t inp_sC = input.strides[1]; index_t inp_sD = input.strides[2]; index_t inp_sH = input.strides[3]; index_t inp_sW = input.strides[4]; index_t grid_sN = grid.strides[0]; index_t grid_sD = grid.strides[1]; index_t grid_sH = grid.strides[2]; index_t grid_sW = grid.strides[3]; index_t grid_sCoor = grid.strides[4]; index_t gOut_sN = grad_output.strides[0]; index_t gOut_sC = grad_output.strides[1]; index_t gOut_sD = grad_output.strides[2]; index_t gOut_sH = grad_output.strides[3]; index_t gOut_sW = grad_output.strides[4]; // gInp_* (and NC_offset below) are not really needed if input_requires_grad is false. int64_t gInp_sN = 0; int64_t gInp_sC = 0; int64_t gInp_sD = 0; int64_t gInp_sH = 0; int64_t gInp_sW = 0; if (input_requires_grad) { gInp_sN = grad_input.strides[0]; gInp_sC = grad_input.strides[1]; gInp_sD = grad_input.strides[2]; gInp_sH = grad_input.strides[3]; gInp_sW = grad_input.strides[4]; } index_t gGrid_sW = grad_grid.strides[3]; CUDA_KERNEL_LOOP_TYPE(index, nthreads, index_t) { const index_t w = index % out_W; const index_t h = (index / out_W) % out_H; const index_t d = (index / (out_H * out_W)) % out_D; const index_t n = index / (out_D * out_H * out_W); const auto grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.data[grid_offset]; scalar_t iy = grid.data[grid_offset + grid_sCoor]; scalar_t iz = grid.data[grid_offset + 2 * grid_sCoor]; // multipliers for gradients on ix, iy, and iz scalar_t gix_mult, giy_mult, giz_mult; ix = grid_sampler_compute_source_index_set_grad(ix, inp_W, padding_mode, align_corners, &gix_mult); iy = grid_sampler_compute_source_index_set_grad(iy, inp_H, padding_mode, align_corners, &giy_mult); iz = grid_sampler_compute_source_index_set_grad(iz, inp_D, padding_mode, align_corners, &giz_mult); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom index_t ix_tnw = static_cast<index_t>(std::floor(ix)); index_t iy_tnw = static_cast<index_t>(std::floor(iy)); index_t iz_tnw = static_cast<index_t>(std::floor(iz)); index_t ix_tne = ix_tnw + 1; index_t iy_tne = iy_tnw; index_t iz_tne = iz_tnw; index_t ix_tsw = ix_tnw; index_t iy_tsw = iy_tnw + 1; index_t iz_tsw = iz_tnw; index_t ix_tse = ix_tnw + 1; index_t iy_tse = iy_tnw + 1; index_t iz_tse = iz_tnw; index_t ix_bnw = ix_tnw; index_t iy_bnw = iy_tnw; index_t iz_bnw = iz_tnw + 1; index_t ix_bne = ix_tnw + 1; index_t iy_bne = iy_tnw; index_t iz_bne = iz_tnw + 1; index_t ix_bsw = ix_tnw; index_t iy_bsw = iy_tnw + 1; index_t iz_bsw = iz_tnw + 1; index_t ix_bse = ix_tnw + 1; index_t iy_bse = iy_tnw + 1; index_t iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); scalar_t gix = static_cast<scalar_t>(0), giy = static_cast<scalar_t>(0), giz = static_cast<scalar_t>(0); scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset; if (input_requires_grad) { NC_offset = n * gInp_sN; } scalar_t *inp_ptr_NC = input.data + n * inp_sN; // calculate bilinear weighted pixel value and set output pixel for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC, inp_ptr_NC += inp_sC) { scalar_t gOut = *gOut_ptr_NCDHW; // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. if (input_requires_grad) { safe_add_3d(grad_input.data, iz_tnw, iy_tnw, ix_tnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tne, iy_tne, ix_tne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tsw, iy_tsw, ix_tsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_tse, iy_tse, ix_tse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, tse * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bnw, iy_bnw, ix_bnw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bnw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bne, iy_bne, ix_bne, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bne * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bsw, iy_bsw, ix_bsw, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bsw * gOut, NC_offset, grad_input_memory_span); safe_add_3d(grad_input.data, iz_bse, iy_bse, ix_bse, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, bse * gOut, NC_offset, grad_input_memory_span); } // calculate grad_grid if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { scalar_t tnw_val = inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW]; gix -= tnw_val * (iy_bse - iy) * (iz_bse - iz) * gOut; giy -= tnw_val * (ix_bse - ix) * (iz_bse - iz) * gOut; giz -= tnw_val * (ix_bse - ix) * (iy_bse - iy) * gOut; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { scalar_t tne_val = inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW]; gix += tne_val * (iy_bsw - iy) * (iz_bsw - iz) * gOut; giy -= tne_val * (ix - ix_bsw) * (iz_bsw - iz) * gOut; giz -= tne_val * (ix - ix_bsw) * (iy_bsw - iy) * gOut; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { scalar_t tsw_val = inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW]; gix -= tsw_val * (iy - iy_bne) * (iz_bne - iz) * gOut; giy += tsw_val * (ix_bne - ix) * (iz_bne - iz) * gOut; giz -= tsw_val * (ix_bne - ix) * (iy - iy_bne) * gOut; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { scalar_t tse_val = inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW]; gix += tse_val * (iy - iy_bnw) * (iz_bnw - iz) * gOut; giy += tse_val * (ix - ix_bnw) * (iz_bnw - iz) * gOut; giz -= tse_val * (ix - ix_bnw) * (iy - iy_bnw) * gOut; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { scalar_t bnw_val = inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW]; gix -= bnw_val * (iy_tse - iy) * (iz - iz_tse) * gOut; giy -= bnw_val * (ix_tse - ix) * (iz - iz_tse) * gOut; giz += bnw_val * (ix_tse - ix) * (iy_tse - iy) * gOut; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { scalar_t bne_val = inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW]; gix += bne_val * (iy_tsw - iy) * (iz - iz_tsw) * gOut; giy -= bne_val * (ix - ix_tsw) * (iz - iz_tsw) * gOut; giz += bne_val * (ix - ix_tsw) * (iy_tsw - iy) * gOut; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { scalar_t bsw_val = inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW]; gix -= bsw_val * (iy - iy_tne) * (iz - iz_tne) * gOut; giy += bsw_val * (ix_tne - ix) * (iz - iz_tne) * gOut; giz += bsw_val * (ix_tne - ix) * (iy - iy_tne) * gOut; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { scalar_t bse_val = inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW]; gix += bse_val * (iy - iy_tnw) * (iz - iz_tnw) * gOut; giy += bse_val * (ix - ix_tnw) * (iz - iz_tnw) * gOut; giz += bse_val * (ix - ix_tnw) * (iy - iy_tnw) * gOut; } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = gix_mult * gix; gGrid_ptr_NDHW[1] = giy_mult * giy; gGrid_ptr_NDHW[2] = giz_mult * giz; } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { if (input_requires_grad) { auto ix_nearest = static_cast<index_t>(std::round(ix)); auto iy_nearest = static_cast<index_t>(std::round(iy)); auto iz_nearest = static_cast<index_t>(std::round(iz)); // assign nearest neighor pixel value to output pixel scalar_t *gOut_ptr_NCDHW = grad_output.data + n * gOut_sN + d * gOut_sD + h * gOut_sH + w * gOut_sW; index_t NC_offset = n * gInp_sN; for (index_t c = 0; c < C; ++c, gOut_ptr_NCDHW += gOut_sC, NC_offset += gInp_sC) { // calculate and set grad_input. See Note [Passing pointer and offset to fastAtomicAdd]. safe_add_3d(grad_input.data, iz_nearest, iy_nearest, ix_nearest, gInp_sD, gInp_sH, gInp_sW, inp_D, inp_H, inp_W, *gOut_ptr_NCDHW, NC_offset, grad_input_memory_span); } } // assuming grad_grid is contiguous // thus we can // 1. use index with gGrid_sW to directly compute gGrid_ptr_NDHW // 2. directly assign to gGrid_ptr_NDHW[0], gGrid_ptr_NDHW[1], gGrid_ptr_NDHW[2] scalar_t *gGrid_ptr_NDHW = grad_grid.data + index * gGrid_sW; gGrid_ptr_NDHW[0] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[1] = static_cast<scalar_t>(0); gGrid_ptr_NDHW[2] = static_cast<scalar_t>(0); } } } } // namespace void launch_grid_sampler_2d_forward_kernel( const TensorBase &output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_2d(input, grid); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_2d_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_3d_forward_kernel( const TensorBase &output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_3d(input, grid, interpolation_mode); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); int64_t count = N * D * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(output)) { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), getTensorInfo<scalar_t, int>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_3d_kernel<scalar_t> <<<GET_BLOCKS(count, 512), 512, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), getTensorInfo<scalar_t, int64_t>(output), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_2d_backward_kernel( const TensorBase &grad_input, const TensorBase &grad_grid, const TensorBase &grad_output, const TensorBase &input, const TensorBase &grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, std::array<bool,2> output_mask) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_2d(input, grid); // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_2d_backward_cuda"); auto N = input.size(0); auto H = grid.size(1); auto W = grid.size(2); // If `input` gradient is not required, we skip computing it -- not needing to create // the tensor to hold the gradient can markedly increase performance. (`grid` gradient // is always computed.) auto input_requires_grad = output_mask[0]; int64_t count = N * H * W; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_2d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0, input_requires_grad); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_2d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, input_requires_grad); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } } void launch_grid_sampler_3d_backward_kernel( const TensorBase &grad_input, const TensorBase &grad_grid, const TensorBase& grad_output, const TensorBase& input, const TensorBase& grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, std::array<bool,2> output_mask) { // See NOTE [ grid_sampler Native Functions ]. // Add checks here in case this is called instead of grid_sampler. check_grid_sampler_common(input, grid); check_grid_sampler_3d(input, grid, interpolation_mode); // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("grid_sampler_3d_backward_cuda"); auto N = input.size(0); auto D = grid.size(1); auto H = grid.size(2); auto W = grid.size(3); int64_t count = N * D * H * W; auto input_requires_grad = output_mask[0]; if (count > 0) { AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "grid_sampler_3d_backward_cuda", [&] { if (canUse32BitIndexMath(input) && canUse32BitIndexMath(grid) && canUse32BitIndexMath(grad_output)) { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( static_cast<int>(count), getTensorInfo<scalar_t, int>(grad_output), getTensorInfo<scalar_t, int>(input), getTensorInfo<scalar_t, int>(grid), input_requires_grad ? getTensorInfo<scalar_t, int>(grad_input) : TensorInfo<scalar_t, int>(), getTensorInfo<scalar_t, int>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? static_cast<int>(grad_input.numel()) : 0, input_requires_grad); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { grid_sampler_3d_backward_kernel<scalar_t> <<<GET_BLOCKS(count, 256), 256, 0, at::cuda::getCurrentCUDAStream()>>>( count, getTensorInfo<scalar_t, int64_t>(grad_output), getTensorInfo<scalar_t, int64_t>(input), getTensorInfo<scalar_t, int64_t>(grid), input_requires_grad ? getTensorInfo<scalar_t, int64_t>(grad_input) : TensorInfo<scalar_t, int64_t>(), getTensorInfo<scalar_t, int64_t>(grad_grid), static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners, /*grad_input_memory_span =*/input_requires_grad ? grad_input.numel() : 0, input_requires_grad); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } } } // namespace at::native
49b0967366368a5ef58c69ee1daa209fb9d4e006.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define SRAND_VALUE 1985 texture<int,2> gridTex; __global__ void GOL(int dim, int *newGrid) { int iy = blockDim.y * blockIdx.y + threadIdx.y; int ix = blockDim.x * blockIdx.x + threadIdx.x; int id = iy * dim + ix; int numNeighbors; float iyTex = (iy + 0.5f)/dim; float ixTex = (ix + 0.5f)/dim; float oneTex = 1.0f/dim; if(iy < dim && ix < dim) { //Get the number of neighbors for a given grid point numNeighbors = tex2D(gridTex, iyTex+oneTex, ixTex) //upper/lower + tex2D(gridTex, iyTex-oneTex, ixTex) + tex2D(gridTex, iyTex, ixTex+oneTex) //right/left + tex2D(gridTex, iyTex, ixTex-oneTex) + tex2D(gridTex, iyTex-oneTex, ixTex-oneTex) //diagonals + tex2D(gridTex, iyTex-oneTex, ixTex+oneTex) + tex2D(gridTex, iyTex+oneTex, ixTex-oneTex) + tex2D(gridTex, iyTex+oneTex, ixTex+oneTex); int cell = tex2D(gridTex, iyTex, ixTex); //Here we have explicitly all of the game rules if (cell == 1 && numNeighbors < 2) newGrid[id] = 0; else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) newGrid[id] = 1; else if (cell == 1 && numNeighbors > 3) newGrid[id] = 0; else if (cell == 0 && numNeighbors == 3) newGrid[id] = 1; else newGrid[id] = cell; } } int main(int argc, char* argv[]) { int i,j,iter; int* h_grid; //Grid on host hipArray* d_grid; //Grid on device int* d_newGrid; //Second grid used on device only int dim = 1024; //Linear dimension of our grid - not counting ghost cells int maxIter = 1<<10; //Number of game steps size_t bytes = sizeof(int)*dim*dim; //Allocate host Grid used for initial setup and read back from device h_grid = (int*)malloc(bytes); //Allocate device grids hipMallocArray(&d_grid, &gridTex.channelDesc, dim, dim); hipMalloc(&d_newGrid, bytes); //Assign initial population randomly srand(SRAND_VALUE); for(i = 0; i<dim; i++) { for(j = 0; j<dim; j++) { h_grid[i*dim+j] = rand() % 2; } } //Copy over initial game grid (Dim-1 threads) hipMemcpyToArray (d_grid, 0, 0, h_grid, bytes, hipMemcpyHostToDevice); hipBindTextureToArray(gridTex, d_grid); gridTex.normalized = true; gridTex.addressMode[0] = hipAddressModeWrap; gridTex.addressMode[1] = hipAddressModeWrap; dim3 dimBlock(8,8); int linGrid = (int)ceil(dim/(float)dimBlock.x); dim3 dimGrid(linGrid,linGrid); //Main game loop for (iter = 0; iter<maxIter; iter++) { hipLaunchKernelGGL(( GOL), dim3(dimGrid),dim3(dimBlock), 0, 0, dim, d_newGrid); //Swap our grids and iterate again hipMemcpyToArray (d_grid, 0, 0, d_newGrid, bytes, hipMemcpyDeviceToDevice); }//iter loop //Copy back results and sum hipMemcpy(h_grid, d_newGrid, bytes, hipMemcpyDeviceToHost); //Sum up alive cells and print results int total = 0; for (i = 0; i<dim; i++) { for (j = 0; j<dim; j++) { total += h_grid[i*dim+j]; } } printf("Total Alive: %d\n", total); hipFree(d_grid); hipFree(d_newGrid); free(h_grid); return 0; }
49b0967366368a5ef58c69ee1daa209fb9d4e006.cu
#include <stdio.h> #include <stdlib.h> #define SRAND_VALUE 1985 texture<int,2> gridTex; __global__ void GOL(int dim, int *newGrid) { int iy = blockDim.y * blockIdx.y + threadIdx.y; int ix = blockDim.x * blockIdx.x + threadIdx.x; int id = iy * dim + ix; int numNeighbors; float iyTex = (iy + 0.5f)/dim; float ixTex = (ix + 0.5f)/dim; float oneTex = 1.0f/dim; if(iy < dim && ix < dim) { //Get the number of neighbors for a given grid point numNeighbors = tex2D(gridTex, iyTex+oneTex, ixTex) //upper/lower + tex2D(gridTex, iyTex-oneTex, ixTex) + tex2D(gridTex, iyTex, ixTex+oneTex) //right/left + tex2D(gridTex, iyTex, ixTex-oneTex) + tex2D(gridTex, iyTex-oneTex, ixTex-oneTex) //diagonals + tex2D(gridTex, iyTex-oneTex, ixTex+oneTex) + tex2D(gridTex, iyTex+oneTex, ixTex-oneTex) + tex2D(gridTex, iyTex+oneTex, ixTex+oneTex); int cell = tex2D(gridTex, iyTex, ixTex); //Here we have explicitly all of the game rules if (cell == 1 && numNeighbors < 2) newGrid[id] = 0; else if (cell == 1 && (numNeighbors == 2 || numNeighbors == 3)) newGrid[id] = 1; else if (cell == 1 && numNeighbors > 3) newGrid[id] = 0; else if (cell == 0 && numNeighbors == 3) newGrid[id] = 1; else newGrid[id] = cell; } } int main(int argc, char* argv[]) { int i,j,iter; int* h_grid; //Grid on host cudaArray* d_grid; //Grid on device int* d_newGrid; //Second grid used on device only int dim = 1024; //Linear dimension of our grid - not counting ghost cells int maxIter = 1<<10; //Number of game steps size_t bytes = sizeof(int)*dim*dim; //Allocate host Grid used for initial setup and read back from device h_grid = (int*)malloc(bytes); //Allocate device grids cudaMallocArray(&d_grid, &gridTex.channelDesc, dim, dim); cudaMalloc(&d_newGrid, bytes); //Assign initial population randomly srand(SRAND_VALUE); for(i = 0; i<dim; i++) { for(j = 0; j<dim; j++) { h_grid[i*dim+j] = rand() % 2; } } //Copy over initial game grid (Dim-1 threads) cudaMemcpyToArray (d_grid, 0, 0, h_grid, bytes, cudaMemcpyHostToDevice); cudaBindTextureToArray(gridTex, d_grid); gridTex.normalized = true; gridTex.addressMode[0] = cudaAddressModeWrap; gridTex.addressMode[1] = cudaAddressModeWrap; dim3 dimBlock(8,8); int linGrid = (int)ceil(dim/(float)dimBlock.x); dim3 dimGrid(linGrid,linGrid); //Main game loop for (iter = 0; iter<maxIter; iter++) { GOL<<<dimGrid,dimBlock>>>(dim, d_newGrid); //Swap our grids and iterate again cudaMemcpyToArray (d_grid, 0, 0, d_newGrid, bytes, cudaMemcpyDeviceToDevice); }//iter loop //Copy back results and sum cudaMemcpy(h_grid, d_newGrid, bytes, cudaMemcpyDeviceToHost); //Sum up alive cells and print results int total = 0; for (i = 0; i<dim; i++) { for (j = 0; j<dim; j++) { total += h_grid[i*dim+j]; } } printf("Total Alive: %d\n", total); cudaFree(d_grid); cudaFree(d_newGrid); free(h_grid); return 0; }
97f51728ca3e461ee088f97fbe2c975db1121c73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/kthvalue_kernel.h" namespace phi { inline int getBlockSize(int col) { if (col > 512) return 1024; else if (col > 256 && col <= 512) return 512; else if (col > 128 && col <= 256) return 256; else if (col > 64 && col <= 128) return 128; else return 64; } template <typename T> bool SortKthvalue(const phi::GPUContext& dev_ctx, const DenseTensor* input_tensor, const int64_t num_cols, const int64_t num_rows, const int k, DenseTensor* out_tensor, DenseTensor* indices_tensor) { auto cu_stream = dev_ctx.stream(); DenseTensor input_indices; const std::vector<int64_t> dims = {num_rows, num_cols}; auto dim = phi::make_ddim(dims); input_indices.Resize(dim); dev_ctx.template Alloc<int64_t>(&input_indices); size_t temp_storage_bytes = -1; int block_size = getBlockSize(num_cols); unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0]; unsigned int grid_size = num_rows < maxGridDimX ? static_cast<unsigned int>(num_rows) : maxGridDimX; hipLaunchKernelGGL(( paddle::operators::InitIndex<int64_t>) , dim3(grid_size), dim3(block_size), 0, cu_stream, input_indices.data<int64_t>(), num_rows, num_cols); hipcub::CountingInputIterator<int64_t> counting_iter(0); hipcub::TransformInputIterator<int64_t, paddle::operators::SegmentOffsetIter, hipcub::CountingInputIterator<int64_t>> segment_offsets_t(counting_iter, paddle::operators::SegmentOffsetIter(num_cols)); T* sorted_values_ptr; int64_t* sorted_indices_ptr; DenseTensor temp_values, temp_indices; const T* input = input_tensor->data<T>(); T* values = out_tensor->data<T>(); int64_t* indices = indices_tensor->mutable_data<int64_t>(dev_ctx.GetPlace()); temp_values.Resize(dim); temp_indices.Resize(dim); sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values); sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices); auto err = hipcub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, input, sorted_values_ptr, input_indices.data<int64_t>(), sorted_indices_ptr, num_cols * num_rows, num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, cu_stream); #ifdef __HIPCC__ if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, status: " << hipGetErrorString(err); return false; } #else if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, status: " << hipGetErrorString(err); return false; } #endif DenseTensor temp_storage; temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))}); uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage); err = hipcub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data, temp_storage_bytes, input, sorted_values_ptr, input_indices.data<int64_t>(), sorted_indices_ptr, num_cols * num_rows, num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, cu_stream); #ifdef __HIPCC__ if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, " << temp_storage_bytes << ", status: " << hipGetErrorString(err); return false; } #else if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, " << temp_storage_bytes << ", status: " << hipGetErrorString(err); return false; } #endif auto& dev = *dev_ctx.eigen_device(); const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1}; const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1}; auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim); auto e_tmp_indices = EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices)); std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)}; dim = phi::make_ddim(odims); auto e_values = EigenMatrix<T>::From(*out_tensor, dim); auto e_tmp_values = EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values)); funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval( dev, e_indices, e_tmp_indices, slice_indices, slice_sizes); funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval( dev, e_values, e_tmp_values, slice_indices, slice_sizes); return true; } template <typename T, typename Context> void KthvalueKernel(const Context& dev_ctx, const DenseTensor& x, int k, int axis, bool keepdim, DenseTensor* output, DenseTensor* indices) { const auto& in_dims = x.dims(); if (axis < 0) axis += in_dims.size(); auto out_dims = output->dims(); const T* input_data = x.data<T>(); T* output_data = dev_ctx.template Alloc<T>(output); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; PADDLE_ENFORCE_EQ( SortKthvalue<T>( dev_ctx, &x, input_width, input_height, k, output, indices), true, phi::errors::External("KthvalueOP: Error when use cub sorting")); return; } else { std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); if (!keepdim) { std::vector<int> tmp_out_shape; for (int i = 0; i < axis; i++) { tmp_out_shape.emplace_back(in_dims[i]); } tmp_out_shape.emplace_back(1); for (int i = axis + 1; i < in_dims.size(); i++) { tmp_out_shape.emplace_back(in_dims[i]); } DDim tmp_out_dims = phi::make_ddim(tmp_out_shape); output->Resize(tmp_out_dims); indices->Resize(tmp_out_dims); } DDim trans_dims(in_dims); DDim trans_out_dims(in_dims); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = in_dims[trans[i]]; } trans_out_dims[in_dims.size() - 1] = 1; DenseTensor trans_input; trans_input.mutable_data<T>(trans_dims, dev_ctx.GetPlace()); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, x, &trans_input, trans); DenseTensor trans_ind, trans_out; trans_ind.mutable_data<int64_t>(trans_out_dims, dev_ctx.GetPlace()); trans_out.mutable_data<T>(trans_out_dims, dev_ctx.GetPlace()); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; PADDLE_ENFORCE_EQ( SortKthvalue<T>(dev_ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind), true, phi::errors::External("KthvalueOP: Error when use cub sorting")); funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, output, trans); if (!keepdim) { output->Resize(out_dims); indices->Resize(out_dims); } } } } // namespace phi PD_REGISTER_KERNEL(kthvalue, GPU, ALL_LAYOUT, phi::KthvalueKernel, float, double, int, int64_t) {}
97f51728ca3e461ee088f97fbe2c975db1121c73.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/operators/top_k_function_cuda.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/kthvalue_kernel.h" namespace phi { inline int getBlockSize(int col) { if (col > 512) return 1024; else if (col > 256 && col <= 512) return 512; else if (col > 128 && col <= 256) return 256; else if (col > 64 && col <= 128) return 128; else return 64; } template <typename T> bool SortKthvalue(const phi::GPUContext& dev_ctx, const DenseTensor* input_tensor, const int64_t num_cols, const int64_t num_rows, const int k, DenseTensor* out_tensor, DenseTensor* indices_tensor) { auto cu_stream = dev_ctx.stream(); DenseTensor input_indices; const std::vector<int64_t> dims = {num_rows, num_cols}; auto dim = phi::make_ddim(dims); input_indices.Resize(dim); dev_ctx.template Alloc<int64_t>(&input_indices); size_t temp_storage_bytes = -1; int block_size = getBlockSize(num_cols); unsigned int maxGridDimX = dev_ctx.GetCUDAMaxGridDimSize()[0]; unsigned int grid_size = num_rows < maxGridDimX ? static_cast<unsigned int>(num_rows) : maxGridDimX; paddle::operators::InitIndex<int64_t> <<<grid_size, block_size, 0, cu_stream>>>( input_indices.data<int64_t>(), num_rows, num_cols); cub::CountingInputIterator<int64_t> counting_iter(0); cub::TransformInputIterator<int64_t, paddle::operators::SegmentOffsetIter, cub::CountingInputIterator<int64_t>> segment_offsets_t(counting_iter, paddle::operators::SegmentOffsetIter(num_cols)); T* sorted_values_ptr; int64_t* sorted_indices_ptr; DenseTensor temp_values, temp_indices; const T* input = input_tensor->data<T>(); T* values = out_tensor->data<T>(); int64_t* indices = indices_tensor->mutable_data<int64_t>(dev_ctx.GetPlace()); temp_values.Resize(dim); temp_indices.Resize(dim); sorted_values_ptr = dev_ctx.template Alloc<T>(&temp_values); sorted_indices_ptr = dev_ctx.template Alloc<int64_t>(&temp_indices); auto err = cub::DeviceSegmentedRadixSort::SortPairs(nullptr, temp_storage_bytes, input, sorted_values_ptr, input_indices.data<int64_t>(), sorted_indices_ptr, num_cols * num_rows, num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, cu_stream); #ifdef __HIPCC__ if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, status: " << hipGetErrorString(err); return false; } #else if (err != cudaSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "cub::DeviceSegmentedRadixSort::SortPairs, status: " << cudaGetErrorString(err); return false; } #endif DenseTensor temp_storage; temp_storage.Resize({static_cast<int>(temp_storage_bytes / sizeof(uint8_t))}); uint8_t* temp_storage_data = dev_ctx.template Alloc<uint8_t>(&temp_storage); err = cub::DeviceSegmentedRadixSort::SortPairs(temp_storage_data, temp_storage_bytes, input, sorted_values_ptr, input_indices.data<int64_t>(), sorted_indices_ptr, num_cols * num_rows, num_rows, segment_offsets_t, segment_offsets_t + 1, 0, sizeof(T) * 8, cu_stream); #ifdef __HIPCC__ if (err != hipSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "hipcub::DeviceSegmentedRadixSort::SortPairs, " << temp_storage_bytes << ", status: " << hipGetErrorString(err); return false; } #else if (err != cudaSuccess) { LOG(ERROR) << "KthvalueOP failed as could not launch " "cub::DeviceSegmentedRadixSort::SortPairs, " << temp_storage_bytes << ", status: " << cudaGetErrorString(err); return false; } #endif auto& dev = *dev_ctx.eigen_device(); const Eigen::DSizes<Eigen::DenseIndex, 2> slice_indices{0, k - 1}; const Eigen::DSizes<Eigen::DenseIndex, 2> slice_sizes{num_rows, 1}; auto e_indices = EigenMatrix<int64_t>::From(*indices_tensor, dim); auto e_tmp_indices = EigenMatrix<int64_t>::From(static_cast<const DenseTensor>(temp_indices)); std::vector<int> odims = {static_cast<int>(num_rows), static_cast<int>(1)}; dim = phi::make_ddim(odims); auto e_values = EigenMatrix<T>::From(*out_tensor, dim); auto e_tmp_values = EigenMatrix<T>::From(static_cast<const DenseTensor>(temp_values)); funcs::EigenSlice<std::decay_t<decltype(dev)>, int64_t, 2>::Eval( dev, e_indices, e_tmp_indices, slice_indices, slice_sizes); funcs::EigenSlice<std::decay_t<decltype(dev)>, T, 2>::Eval( dev, e_values, e_tmp_values, slice_indices, slice_sizes); return true; } template <typename T, typename Context> void KthvalueKernel(const Context& dev_ctx, const DenseTensor& x, int k, int axis, bool keepdim, DenseTensor* output, DenseTensor* indices) { const auto& in_dims = x.dims(); if (axis < 0) axis += in_dims.size(); auto out_dims = output->dims(); const T* input_data = x.data<T>(); T* output_data = dev_ctx.template Alloc<T>(output); int64_t* indices_data = dev_ctx.template Alloc<int64_t>(indices); if (axis == in_dims.size() - 1) { const int64_t& input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t& input_width = in_dims[in_dims.size() - 1]; PADDLE_ENFORCE_EQ( SortKthvalue<T>( dev_ctx, &x, input_width, input_height, k, output, indices), true, phi::errors::External("KthvalueOP: Error when use cub sorting")); return; } else { std::vector<int> trans; for (int i = 0; i < axis; i++) { trans.emplace_back(i); } trans.emplace_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.emplace_back(i); } trans.emplace_back(axis); if (!keepdim) { std::vector<int> tmp_out_shape; for (int i = 0; i < axis; i++) { tmp_out_shape.emplace_back(in_dims[i]); } tmp_out_shape.emplace_back(1); for (int i = axis + 1; i < in_dims.size(); i++) { tmp_out_shape.emplace_back(in_dims[i]); } DDim tmp_out_dims = phi::make_ddim(tmp_out_shape); output->Resize(tmp_out_dims); indices->Resize(tmp_out_dims); } DDim trans_dims(in_dims); DDim trans_out_dims(in_dims); for (int i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; trans_out_dims[i] = in_dims[trans[i]]; } trans_out_dims[in_dims.size() - 1] = 1; DenseTensor trans_input; trans_input.mutable_data<T>(trans_dims, dev_ctx.GetPlace()); int ndims = trans.size(); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, x, &trans_input, trans); DenseTensor trans_ind, trans_out; trans_ind.mutable_data<int64_t>(trans_out_dims, dev_ctx.GetPlace()); trans_out.mutable_data<T>(trans_out_dims, dev_ctx.GetPlace()); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; PADDLE_ENFORCE_EQ( SortKthvalue<T>(dev_ctx, &trans_input, input_width, input_height, k, &trans_out, &trans_ind), true, phi::errors::External("KthvalueOP: Error when use cub sorting")); funcs::TransCompute<phi::GPUContext, int64_t>( ndims, dev_ctx, trans_ind, indices, trans); funcs::TransCompute<phi::GPUContext, T>( ndims, dev_ctx, trans_out, output, trans); if (!keepdim) { output->Resize(out_dims); indices->Resize(out_dims); } } } } // namespace phi PD_REGISTER_KERNEL(kthvalue, GPU, ALL_LAYOUT, phi::KthvalueKernel, float, double, int, int64_t) {}
883e2a71101f2e887ad82605885fecc9c7ca1c98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include "MNIST_for_C-master/mnist.h" #include<stdio.h> #include<string.h> #include <stdlib.h> #include <stdarg.h> #include<time.h> #include <math.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \ exit(1); \ } \ } #define ITERATION_COUNT 100 #define BATCH_SIZE 32 #define BLOCK_X 32 #define BLOCK_Y 32 float learning_rate = 1.0e-6; #define INPUT_NODE_COUNT 784 #define HIDDEN_LAYER_NODE_COUNT 96 #define OUTPUT_NODE_COUNT 32 // All vectors/matrices are stored as this structure in the memory... struct Vector2D { // Whole vector/matrix data is stored in one dimensional array... // All numbers are floating point numbers.... //This pointer points where the vector/matrix data lyies.... float * data; // Row number of the vector/matrix... int height; // Column number of the vector/matrix... int width; int size; }; // We are defining a type from this structure definition... typedef struct Vector2D Vector2D; float * device_matrix_location; Vector2D * CreateVector2D(float * data, int height, int width, bool fill = true, bool store = false) { // A new structure is allocated in GPU memory for matrix/vector... Vector2D * temp ; CHECK(hipMalloc(&temp, sizeof(Vector2D))); float * temp2; CHECK(hipMalloc(&temp2, sizeof(float)*height*width)); if(fill == true) CHECK(hipMemcpy(temp2, data, sizeof(float)*height*width, hipMemcpyHostToDevice)); CHECK(hipMemcpy(&temp->data, &temp2, sizeof(float *), hipMemcpyHostToDevice)); CHECK(hipMemcpy(&temp->height, (void *)(&height), sizeof(int), hipMemcpyHostToDevice)); CHECK(hipMemcpy(&temp->width, (void *)(&width), sizeof(int), hipMemcpyHostToDevice)); //temp->height = height; //temp->width = width; if(store == true) device_matrix_location = temp2; hipDeviceSynchronize(); return temp; } __global__ void MatrixSubtract(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix Subtract diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixSubtractvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixSubtract\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] - vec2->data[tid]; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] - vec2->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] - vec2->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] - vec2->data[tid+3*blockDim.x]; } } #define TILE_WIDTH BLOCK_X #define TILE_HEIGHT BLOCK_Y __global__ void MatrixProductShared( Vector2D * __restrict__ result, Vector2D * __restrict__ m1, Vector2D * __restrict__ m2 )//float *A, float *B, float *C ) { { __shared__ float A_tile[TILE_HEIGHT][TILE_WIDTH]; __shared__ float B_tile[TILE_HEIGHT][TILE_WIDTH+1]; int numARows = m1->height, numAColumns= m1->width, numBRows = m2->height, numBColumns = m2->width, numCRows = result->height, numCColumns = m2->width; float * A = m1->data, * B = m2->data, * C = result->data; float sum = 0.0; // where am I? // tx for thread_x or tile_x int tx = threadIdx.x; int ty = threadIdx.y; // cx for top left corner of tile in C int cx = blockIdx.x * blockDim.x; int cy = blockIdx.y * blockDim.y; // Cx for cell coordinates in C int Cx = cx + tx; int Cy = cy + ty; int total_tiles = (numAColumns + TILE_WIDTH - 1) / TILE_WIDTH; for (int tile_idx = 0; tile_idx < total_tiles; tile_idx++) { // the corresponding tiles' top left corners are: // for A: row = blockIdx.y * blockDim.y, col = tile_idx * TILE_WIDTH // for B: row = tile_idx * TILE_WIDTH, col = blockIdx.x * blockDim.x // loading tiles int Ax = tile_idx * TILE_WIDTH + tx; int Ay = cy + ty; int Bx = cx + tx; int By = tile_idx * TILE_WIDTH + ty; if (Ax < numAColumns && Ay < numARows) { A_tile[ty][tx] = A[Ay * numAColumns + Ax]; } else { A_tile[ty][tx] = 0.0; } if (Bx < numBColumns && By < numBRows) { B_tile[ty][tx] = B[By * numBColumns + Bx]; } else { B_tile[ty][tx] = 0.0; } __syncthreads(); // multiplying tiles #pragma unroll 4 for (int i = 0; i < TILE_WIDTH; i++) { sum += A_tile[ty][i] * B_tile[i][tx]; } __syncthreads(); } // saving result (discarded if we're in the wrong thread) if (Cx < numCColumns && Cy < numCRows) { C[Cy * numCColumns + Cx] = sum; } } __global__ void TransposeVector2DShared(Vector2D * __restrict__ res, Vector2D * __restrict__ m1) { int thx = blockIdx.x*blockDim.x+ threadIdx.x; int thy = blockIdx.y*blockDim.y+threadIdx.y; int tid = thx + thy*m1->width; __shared__ float ordered_data[BLOCK_Y][BLOCK_X+1]; __shared__ float transposed_data[BLOCK_Y][BLOCK_X+1]; int j = threadIdx.x+blockDim.x*blockIdx.y; int k = threadIdx.y + blockDim.y*blockIdx.x; int target = j + res->width*k; if(tid < m1->width*m1->height) { //padded ordered_data[threadIdx.y][threadIdx.x] = m1->data[tid] ; } __syncthreads(); //transposed_data[thy+thx*m1->height] = ordered_data[tid] ; if(thx < m1->width && thy< m1->height) { transposed_data[threadIdx.x][threadIdx.y] = ordered_data[threadIdx.y][threadIdx.x]; } __syncthreads(); if(thx < m1->width && thy< m1->height) { res->data [target] = transposed_data[threadIdx.y][threadIdx.x] ; //printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } if(tid ==0) { //printf("\nTransposeVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < m1->height) || (blockDim.x*gridDim.x<m1->width)) { printf("\nTransposeVector2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } } __global__ void DisplayVector2D(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("]\n"); printf("Row : %d - Width : %d \n\n", vector->height, vector->width); } __device__ float error_sum[BATCH_SIZE]; __global__ void Sum2D(Vector2D * __restrict__ vec) { int tid = threadIdx.y; int val = 0; int width = vec->width; #pragma unroll 4 for(int a = 0; a < width; a++) { val += vec->data[a+tid*width]; } error_sum[tid] = val; } __device__ int arg_max_result[BATCH_SIZE]; __global__ void ArgMax2D(Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nArgMax2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nArgMax2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float max = -100000; int max_index = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { if(vec1->data[tid*vec1->width+a]>max) { max = vec1->data[tid*vec1->width+a]; max_index = a; } } arg_max_result[tid] = max_index; } } __global__ void Softmax(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nSoftmaxvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nSoftmax\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float toplam = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { toplam += vec1->data[a+tid*vec1->width]; } for(int a = 0; a < vec1->width;a++) { result->data[a+tid*vec1->width] = vec1->data[a+tid*vec1->width]/toplam; } } } __global__ void PointerSet(Vector2D * f1, Vector2D * f2, int shift, int batch_size) { f1->width = f2->width; f1->height = batch_size; f1->data = f2->data + f2->width*shift; } float generate_uniform(float a, float b) { return rand() / (RAND_MAX + 1.0) * (b - a) + a; } Vector2D * CreateWeightMatrix(int input_count, int output_count) { float init_range = 0; Vector2D * temp = (Vector2D *)malloc(sizeof(Vector2D)); Vector2D * device_temp; CHECK(hipMalloc(&device_temp, sizeof(Vector2D))); temp->height = input_count; //For bias... temp->width = output_count; temp->data = (float * )malloc(sizeof(float)*(input_count)*output_count); init_range = sqrt(2.0 / input_count); for(int a=0; a<(input_count)*output_count; a++) { temp->data[a] = generate_uniform(-init_range, init_range); } float * temp2; CHECK(hipMalloc(&temp2, sizeof(float)*temp->height*temp->width)); CHECK(hipMemcpy(temp2, temp->data, sizeof(float)*temp->height*temp->width, hipMemcpyHostToDevice)); CHECK(hipMemcpy(&device_temp->data, &temp2, sizeof(float *), hipMemcpyHostToDevice)); CHECK(hipMemcpy(&device_temp->height, &(temp->height), sizeof(int), hipMemcpyHostToDevice)); CHECK(hipMemcpy(&device_temp->width, &(temp->width), sizeof(int), hipMemcpyHostToDevice)); return device_temp; } Vector2D * CreateVector2DCPU(float * data, int height, int width) { // A new structure is allocated in memory for matrix/vector... Vector2D * temp = (Vector2D *)malloc(sizeof(struct Vector2D)); temp->data = data; temp->height = height; temp->width = width; return temp; }; Vector2D * CreateOneHot(Vector2D * indexes, int vector_length) { Vector2D * one_hot_vector = (Vector2D*)malloc(sizeof(Vector2D)); one_hot_vector->height = indexes->height; one_hot_vector->width = vector_length; one_hot_vector->size = one_hot_vector->height; one_hot_vector->data = (float *)malloc(sizeof(float)*indexes->height*vector_length); memset(one_hot_vector->data, 0, sizeof(float)*indexes->height*vector_length); for(int i=0; i<one_hot_vector->height;i++) { one_hot_vector->data[i*vector_length+(int)indexes->data[i*indexes->width]] = 1.0; } return one_hot_vector; } void DisplayVector2DCPU(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("\b\b\b]"); } __global__ void AddandSigmoid(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\AddandSigmoid\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + blockDim.x*3 < vec1->width*vec1->height) { result->data[tid] = 1.0/(1+exp(-(vec1->data[tid] + vec2->data[tid]))); result->data[tid+blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+blockDim.x] + vec2->data[tid+blockDim.x]))); result->data[tid+2*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+2*blockDim.x] + vec2->data[tid+2*blockDim.x]))); result->data[tid+3*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+3*blockDim.x] + vec2->data[tid+3*blockDim.x]))); } } __global__ void AddandExponential(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width)) { printf("\AddandExponential\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->width*vec1->height) { result->data[tid] = exp(vec1->data[tid] + vec2->data[tid]); } } //Combination of matrixpairwise-Scalarminus-matrixpairwise in backpropagte.... __global__ void LayerErrorCalculate(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + 3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * vec2->data[tid]*(1-vec2->data[tid]) ; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * vec2->data[tid+blockDim.x]*(1-vec2->data[tid+blockDim.x]) ; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * vec2->data[tid+2*blockDim.x]*(1-vec2->data[tid+2*blockDim.x]) ; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * vec2->data[tid+3*blockDim.x]*(1-vec2->data[tid+3*blockDim.x]) ; } } __global__ void ApplyWeightChange(Vector2D * __restrict__ result, float learning_rate, Vector2D * __restrict__ source) { if((result->width != source->width) || (result->height != source->height)) { printf("\n\n**********ScalarMatrixProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*source->width+tx; if(tid ==0) { //printf("\nScalarMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < source->height) || (blockDim.x*gridDim.x<source->width/4)) { printf("\nScalarMatrixProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", source->width, source->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< source->width*source->height) { result->data[tid] += learning_rate*source->data[tid]; result->data[tid+blockDim.x] += learning_rate*source->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] += learning_rate*source->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] += learning_rate*source->data[tid+3*blockDim.x]; } } __global__ void Vector2DInfo(Vector2D * vec) { printf("\n\nWidth : %d - height : %d\n\n", vec->width, vec->height); } __global__ void calculateCrossEntropyLoss(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * log(vec2->data[tid]); result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * log(vec2->data[tid+blockDim.x]); result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * log(vec2->data[tid+2*blockDim.x]); result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * log(vec2->data[tid+3*blockDim.x]); } } #define EMPTY printf("\n\n"); Vector2D * w1, * w2, * b1, * b2; Vector2D * output_1, * output_2; Vector2D * bias_result_1, * bias_result_2; Vector2D * ones, * ones_transpose; void FeedForward(Vector2D * device_input, int batch_size) { //input * w1 dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid), dim3(block), 0, 0, output_1, device_input, w1); hipDeviceSynchronize(); //transpose ones * b1 hipLaunchKernelGGL(( MatrixProductShared), dim3(grid), dim3(block), 0, 0, bias_result_1, ones_transpose, b1); hipDeviceSynchronize(); int temp = grid.x ; grid.x /=4; if(grid.x == 0)grid.x = 1; hipLaunchKernelGGL(( AddandSigmoid), dim3(grid), dim3(block), 0, 0, output_1, output_1, bias_result_1); hipDeviceSynchronize(); grid.x =temp; /* //bias1 + input*w1 MatrixAdd<<<grid, block>>>(output_1, output_1, bias_result_1); hipDeviceSynchronize(); // output of hidden layer... Sigmoid<<<grid, block>>>(output_1, output_1); hipDeviceSynchronize(); */ //output of hidden layer * w2 grid.x = (OUTPUT_NODE_COUNT+block.x-1)/block.x; grid.y = (batch_size+block.y-1)/block.y; hipLaunchKernelGGL(( MatrixProductShared), dim3(grid), dim3(block), 0, 0, output_2, output_1, w2); hipDeviceSynchronize(); //transpose ones * b2 hipLaunchKernelGGL(( MatrixProductShared), dim3(grid), dim3(block), 0, 0, bias_result_2, ones_transpose, b2); hipDeviceSynchronize(); hipLaunchKernelGGL(( AddandExponential), dim3(grid), dim3(block), 0, 0, output_2, output_2, bias_result_2); hipDeviceSynchronize(); /* //bias2 + output of hidden layer * w2 - final output.... MatrixAdd<<<grid, block>>>(output_2, output_2, bias_result_2); hipDeviceSynchronize(); Exponential<<<grid, block>>>(output_2, output_2); hipDeviceSynchronize(); */ grid.x = 1; block.x = 1; hipLaunchKernelGGL(( Softmax), dim3(grid), dim3(block), 0, 0, output_2, output_2); hipDeviceSynchronize(); } Vector2D * layer_2_error, * layer_1_error; Vector2D * w1_update, * w2_update, * b1_update, * b2_update; Vector2D * output_1_transpose, * input_transpose; Vector2D * label_data; Vector2D * device_whole_label; Vector2D * device_whole_data; Vector2D * device_input; Vector2D * w2_transpose; Vector2D * scalar_minus; Vector2D * batch_data; Vector2D * batch_label; void BackPropagate(Vector2D * data, Vector2D * label, int batch_size) { FeedForward(data, batch_size); int temp; //Output error calculation dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); temp = grid.x ; grid.x /= 4; if(grid.x ==0 )grid.x = 1; hipLaunchKernelGGL(( MatrixSubtract), dim3(grid), dim3(block), 0, 0, layer_2_error, label, output_2); grid.x = temp; //output1 transpose dim3 grid2((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); hipLaunchKernelGGL(( TransposeVector2DShared), dim3(grid2), dim3(block), 0, 0, output_1_transpose, output_1); hipDeviceSynchronize(); //W2 update... dim3 grid3((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid3), dim3(block), 0, 0, w2_update, output_1_transpose, layer_2_error); //b2 update dim3 grid4((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid4), dim3(block), 0, 0, b2_update, ones, layer_2_error); //W2 transpose dim3 grid5((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); hipLaunchKernelGGL(( TransposeVector2DShared), dim3(grid5), dim3(block), 0, 0, w2_transpose, w2); hipDeviceSynchronize(); //Layer 1 error dim3 grid6((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid6), dim3(block), 0, 0, layer_1_error, layer_2_error, w2_transpose); hipDeviceSynchronize(); temp = grid.x; grid.x /=4; if(grid.x == 0)grid.x = 1; hipLaunchKernelGGL(( LayerErrorCalculate), dim3(grid6), dim3(block), 0, 0, layer_1_error, layer_1_error, output_1); grid.x =temp; /* MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, output_1); hipDeviceSynchronize(); ScalarMinusVector2D<<<grid6, block>>>(scalar_minus, 1.0, output_1); hipDeviceSynchronize(); MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, scalar_minus); hipDeviceSynchronize(); */ //Input transpose dim3 grid7((INPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); hipLaunchKernelGGL(( TransposeVector2DShared), dim3(grid7), dim3(block), 0, 0, input_transpose, data); hipDeviceSynchronize(); //w1 update.... dim3 grid8((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid8), dim3(block), 0, 0, w1_update, input_transpose, layer_1_error); //b1 update dim3 grid9((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); hipLaunchKernelGGL(( MatrixProductShared), dim3(grid9), dim3(block), 0, 0, b1_update, ones, layer_1_error); hipDeviceSynchronize(); //Buras //w2_update * learning rate dim3 grid10((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); temp = grid10.x ; grid10.x /= 4; if(grid10.x ==0)grid10.x = 1; hipLaunchKernelGGL(( ApplyWeightChange), dim3(grid10), dim3(block), 0, 0, w2, learning_rate, w2_update); grid10.x = temp; /*ScalarMatrixProduct<<<grid10, block>>>(w2_update, learning_rate, w2_update); hipDeviceSynchronize(); //Apply w2 update MatrixAdd<<<grid10, block>>>(w2, w2, w2_update); hipDeviceSynchronize(); */ //b2_update * learning_rate dim3 grid11((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid11.x ; grid11.x /= 4; if(grid11.x == 0) grid11.x = 1; hipLaunchKernelGGL(( ApplyWeightChange), dim3(grid11), dim3(block), 0, 0, b2, learning_rate, b2_update); grid11.x = temp; /*ScalarMatrixProduct<<<grid11, block>>>(b2_update, learning_rate, b2_update); hipDeviceSynchronize(); //Apply b2 update MatrixAdd<<<grid11, block>>>(b2, b2, b2_update); hipDeviceSynchronize(); */ //w1_update * leraning_rate dim3 grid12((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); temp = grid12.x; grid12.x /= 4; if(grid12.x == 0)grid12.x = 1; hipLaunchKernelGGL(( ApplyWeightChange), dim3(grid12), dim3(block), 0, 0, w1, learning_rate, w1_update); /* ScalarMatrixProduct<<<grid12, block>>>(w1_update, learning_rate, w1_update); hipDeviceSynchronize(); //Apply w1 update MatrixAdd<<<grid12, block>>>(w1, w1, w1_update); hipDeviceSynchronize(); */ dim3 grid13((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid13.x; grid13.x /= 4; if(grid13.x == 0)grid13.x = 1; hipLaunchKernelGGL(( ApplyWeightChange), dim3(grid13), dim3(block), 0, 0, b1, learning_rate, b1_update); /* ScalarMatrixProduct<<<grid13, block>>>(b1_update, learning_rate, b1_update); hipDeviceSynchronize(); //Apply b1 update MatrixAdd<<<grid13, block>>>(b1, b1, b1_update); hipDeviceSynchronize(); */ hipDeviceSynchronize(); } Vector2D * load_text_data() { FILE * dosya = fopen("text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_label_data() { FILE * dosya = fopen("label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_text_data() { FILE * dosya = fopen("test_text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float*)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_label_data() { FILE * dosya = fopen("test_label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } int train_count = 50000; int test_count = 10000; Vector2D * device_whole_test_data, * device_whole_test_label_data; int main() { clock_t error_calculation_start, error_calculation_end; clock_t train_start, train_end; clock_t program_start, program_end ; program_start = clock(); clock_t execution_start, execution_end; execution_start = clock(); srand(time(0)); int blockx = 32, blocky = 32; dim3 block(blockx, blocky); int batch_size = BATCH_SIZE; float * ones_ = (float *)malloc(sizeof(float)*batch_size); for(int a = 0; a< batch_size;a++)ones_[a] = 1.0; ones = CreateVector2D(ones_, 1, batch_size, true); ones_transpose = CreateVector2D(ones_, batch_size, 1, true); //first hidden layer 160 input 784 w1 = CreateWeightMatrix(INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT); b1 = CreateWeightMatrix(1, HIDDEN_LAYER_NODE_COUNT); bias_result_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1_transpose = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, batch_size, false); w1_update = CreateVector2D(NULL, INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b1_update = CreateVector2D(NULL, 1, HIDDEN_LAYER_NODE_COUNT, false); //output 10 nodes.... w2 = CreateWeightMatrix(HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT); w2_transpose = CreateVector2D(NULL, OUTPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b2 = CreateWeightMatrix(1, OUTPUT_NODE_COUNT); bias_result_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); output_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); w2_update = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT, false); b2_update = CreateVector2D(NULL, 1, OUTPUT_NODE_COUNT, false); layer_2_error = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); layer_1_error = CreateVector2D(NULL , batch_size, HIDDEN_LAYER_NODE_COUNT, false); scalar_minus = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); input_transpose = CreateVector2D(NULL, INPUT_NODE_COUNT, batch_size, false); load_mnist(); printf("SIZE : %d", SIZE); float * train_data = (float *)malloc(sizeof(float)*train_count*784); for(int h = 0; h < train_count; h++) { for(int w=0; w < 784; w++) { //if(h == 0)printf("%f ", train_image[h][w]); train_data[h*784+w] = train_image[h][w]; } } float * train_label_float = (float *)malloc(sizeof(float)*train_count); for(int h = 0; h < train_count; h++) { train_label_float[h] = train_label[h]; } float * test_data = (float *)malloc(sizeof(float)*test_count*784); for(int h = 0; h < test_count; h++) { for(int w=0; w < 784; w++) { test_data[h*784+w] = test_image[h][w]; } } float * test_label_float = (float *)malloc(sizeof(float)*test_count); for(int h = 0; h < test_count; h++) { test_label_float[h] = test_label[h]; } #define SIZE 784 Vector2D * data_set = CreateVector2DCPU(train_data, train_count, SIZE); Vector2D * image_labels = CreateVector2DCPU(train_label_float, train_count, 1 ); Vector2D * one_hot_labels = CreateOneHot(image_labels, OUTPUT_NODE_COUNT); Vector2D * test_samples = CreateVector2DCPU(test_data, test_count, SIZE); Vector2D * test_label_indices = CreateVector2DCPU(test_label_float, test_count, 1); Vector2D * test_labels = CreateOneHot(test_label_indices, OUTPUT_NODE_COUNT); device_whole_data = CreateVector2D(data_set->data, data_set->height, INPUT_NODE_COUNT); device_whole_label = CreateVector2D(one_hot_labels->data, data_set->height, OUTPUT_NODE_COUNT); batch_data = CreateVector2D(NULL, batch_size, INPUT_NODE_COUNT, false); batch_label = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); device_whole_test_data = CreateVector2D(test_samples->data, test_samples->height, INPUT_NODE_COUNT); device_whole_test_label_data = CreateVector2D(test_labels->data, test_labels->height, OUTPUT_NODE_COUNT); Vector2D * batch_test_data, * batch_label_data; batch_test_data = CreateVector2D(NULL, batch_size, test_samples->width, false); batch_label_data = CreateVector2D(NULL, batch_size, test_labels->width, false); int predicted_labels[BATCH_SIZE]; int correct_number = 0, false_number = 0; double error_val[BATCH_SIZE]; double toplam = 0; #define ITERATION_COUNT 100 for(int iteration = 0 ; iteration < ITERATION_COUNT;iteration++) { error_calculation_start = clock(); /*for(int batch_index = 1; batch_index < data_set->height/batch_size; batch_index++) { hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_data, device_whole_data, (batch_index -1)*batch_size, batch_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_label, device_whole_label, (batch_index-1)*batch_size, batch_size); hipDeviceSynchronize(); FeedForward(batch_data, batch_size); } */ train_start = clock(); for(int batch_index = 1; batch_index < data_set->height/batch_size; batch_index++) { hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_data, device_whole_data, (batch_index -1)*batch_size, batch_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_label, device_whole_label, (batch_index-1)*batch_size, batch_size); hipDeviceSynchronize(); BackPropagate(batch_data, batch_label, batch_size); dim3 gridd((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); toplam = 0; int temp; temp = gridd.x ; gridd.x /= 4; if(gridd.x == 0)gridd.x = 1; hipLaunchKernelGGL(( calculateCrossEntropyLoss), dim3(gridd), dim3(block), 0, 0, layer_2_error, batch_label, output_2); gridd.x = temp; hipDeviceSynchronize(); dim3 k(1, BATCH_SIZE); hipLaunchKernelGGL(( Sum2D), dim3(1), dim3(k), 0, 0, layer_2_error); hipDeviceSynchronize(); hipMemcpyFromSymbol( &error_val, error_sum, sizeof(float)*BATCH_SIZE); //printf("\n\nIteration %d - Error : %f\n", iteration, error_val); hipDeviceSynchronize(); for(int a=0;a<BATCH_SIZE;a++) toplam += -error_val[a]; printf("\nITeration %d batch %d error %f \n", iteration, batch_index, toplam); printf("\n\nTest Accuracy : %f", (float(correct_number)/(correct_number+false_number)*100.0)); } correct_number = 0 ; false_number = 0; for(int batch_index = 0; batch_index < test_labels->height/batch_size; batch_index++) { hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_test_data, device_whole_test_data, (batch_index)*batch_size, batch_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_label_data, device_whole_test_label_data, (batch_index)*batch_size, batch_size); hipDeviceSynchronize(); //printf("\nFeed forward...\n"); FeedForward(batch_test_data, batch_size); //printf("\nArgmax2d\n"); dim3 block(1 , BATCH_SIZE); hipLaunchKernelGGL(( ArgMax2D), dim3(1), dim3(block), 0, 0, output_2); hipDeviceSynchronize(); hipMemcpyFromSymbol( &predicted_labels, arg_max_result, sizeof(int)*BATCH_SIZE); hipDeviceSynchronize(); for(int i = 0; i < BATCH_SIZE;i++) { if( abs(predicted_labels[i] - test_label_indices->data[i + BATCH_SIZE*batch_index*test_label_indices->width]) < 0.1) { correct_number ++; } else false_number ++; } /*printf("\nCorrect output : \n"); DisplayVector2D<<<1, 1>>>(batch_label_data); hipDeviceSynchronize(); */ } train_end = clock(); printf("\nIteration %d - Whole data train time : %f\n\n", iteration, (double)(train_end - train_start) / CLOCKS_PER_SEC); } printf("\nTraining has finished...\n"); execution_end = clock(); printf("\nWhole data train time : %f\n\n", (double)(execution_end - execution_start) / CLOCKS_PER_SEC); for(int batch_index = 0; batch_index < test_labels->height/batch_size; batch_index++) { hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_test_data, device_whole_test_data, (batch_index)*batch_size, batch_size); hipDeviceSynchronize(); hipLaunchKernelGGL(( PointerSet), dim3(1) ,dim3(1), 0, 0, batch_label_data, device_whole_test_label_data, (batch_index)*batch_size, batch_size); hipDeviceSynchronize(); //printf("\nFeed forward...\n"); FeedForward(batch_test_data, batch_size); //printf("\nArgmax2d\n"); dim3 block(1 , BATCH_SIZE); hipLaunchKernelGGL(( ArgMax2D), dim3(1), dim3(block), 0, 0, output_2); hipDeviceSynchronize(); hipMemcpyFromSymbol( &predicted_labels, arg_max_result, sizeof(int)*BATCH_SIZE); hipDeviceSynchronize(); for(int i = 0; i < BATCH_SIZE;i++) { if( abs(predicted_labels[i] - test_label_indices->data[i + BATCH_SIZE*batch_index*test_label_indices->width]) < 0.1) { correct_number ++; } else false_number ++; } /*printf("\nCorrect output : \n"); DisplayVector2D<<<1, 1>>>(batch_label_data); hipDeviceSynchronize(); */ } printf("\n\nAccuracy : %f", (float(correct_number)/(correct_number+false_number)*100.0)); printf("\nTamam\n"); hipDeviceReset(); program_end = clock(); printf("\Program execution time : %f\n\n", (double)(program_end- program_start) / CLOCKS_PER_SEC); }
883e2a71101f2e887ad82605885fecc9c7ca1c98.cu
#include<stdio.h> #include "MNIST_for_C-master/mnist.h" #include<stdio.h> #include<string.h> #include <stdlib.h> #include <stdarg.h> #include<time.h> #include <math.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } #define ITERATION_COUNT 100 #define BATCH_SIZE 32 #define BLOCK_X 32 #define BLOCK_Y 32 float learning_rate = 1.0e-6; #define INPUT_NODE_COUNT 784 #define HIDDEN_LAYER_NODE_COUNT 96 #define OUTPUT_NODE_COUNT 32 // All vectors/matrices are stored as this structure in the memory... struct Vector2D { // Whole vector/matrix data is stored in one dimensional array... // All numbers are floating point numbers.... //This pointer points where the vector/matrix data lyies.... float * data; // Row number of the vector/matrix... int height; // Column number of the vector/matrix... int width; int size; }; // We are defining a type from this structure definition... typedef struct Vector2D Vector2D; float * device_matrix_location; Vector2D * CreateVector2D(float * data, int height, int width, bool fill = true, bool store = false) { // A new structure is allocated in GPU memory for matrix/vector... Vector2D * temp ; CHECK(cudaMalloc(&temp, sizeof(Vector2D))); float * temp2; CHECK(cudaMalloc(&temp2, sizeof(float)*height*width)); if(fill == true) CHECK(cudaMemcpy(temp2, data, sizeof(float)*height*width, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->height, (void *)(&height), sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&temp->width, (void *)(&width), sizeof(int), cudaMemcpyHostToDevice)); //temp->height = height; //temp->width = width; if(store == true) device_matrix_location = temp2; cudaDeviceSynchronize(); return temp; } __global__ void MatrixSubtract(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix Subtract diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixSubtractvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixSubtract\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] - vec2->data[tid]; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] - vec2->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] - vec2->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] - vec2->data[tid+3*blockDim.x]; } } #define TILE_WIDTH BLOCK_X #define TILE_HEIGHT BLOCK_Y __global__ void MatrixProductShared( Vector2D * __restrict__ result, Vector2D * __restrict__ m1, Vector2D * __restrict__ m2 )//float *A, float *B, float *C ) { { __shared__ float A_tile[TILE_HEIGHT][TILE_WIDTH]; __shared__ float B_tile[TILE_HEIGHT][TILE_WIDTH+1]; int numARows = m1->height, numAColumns= m1->width, numBRows = m2->height, numBColumns = m2->width, numCRows = result->height, numCColumns = m2->width; float * A = m1->data, * B = m2->data, * C = result->data; float sum = 0.0; // where am I? // tx for thread_x or tile_x int tx = threadIdx.x; int ty = threadIdx.y; // cx for top left corner of tile in C int cx = blockIdx.x * blockDim.x; int cy = blockIdx.y * blockDim.y; // Cx for cell coordinates in C int Cx = cx + tx; int Cy = cy + ty; int total_tiles = (numAColumns + TILE_WIDTH - 1) / TILE_WIDTH; for (int tile_idx = 0; tile_idx < total_tiles; tile_idx++) { // the corresponding tiles' top left corners are: // for A: row = blockIdx.y * blockDim.y, col = tile_idx * TILE_WIDTH // for B: row = tile_idx * TILE_WIDTH, col = blockIdx.x * blockDim.x // loading tiles int Ax = tile_idx * TILE_WIDTH + tx; int Ay = cy + ty; int Bx = cx + tx; int By = tile_idx * TILE_WIDTH + ty; if (Ax < numAColumns && Ay < numARows) { A_tile[ty][tx] = A[Ay * numAColumns + Ax]; } else { A_tile[ty][tx] = 0.0; } if (Bx < numBColumns && By < numBRows) { B_tile[ty][tx] = B[By * numBColumns + Bx]; } else { B_tile[ty][tx] = 0.0; } __syncthreads(); // multiplying tiles #pragma unroll 4 for (int i = 0; i < TILE_WIDTH; i++) { sum += A_tile[ty][i] * B_tile[i][tx]; } __syncthreads(); } // saving result (discarded if we're in the wrong thread) if (Cx < numCColumns && Cy < numCRows) { C[Cy * numCColumns + Cx] = sum; } } __global__ void TransposeVector2DShared(Vector2D * __restrict__ res, Vector2D * __restrict__ m1) { int thx = blockIdx.x*blockDim.x+ threadIdx.x; int thy = blockIdx.y*blockDim.y+threadIdx.y; int tid = thx + thy*m1->width; __shared__ float ordered_data[BLOCK_Y][BLOCK_X+1]; __shared__ float transposed_data[BLOCK_Y][BLOCK_X+1]; int j = threadIdx.x+blockDim.x*blockIdx.y; int k = threadIdx.y + blockDim.y*blockIdx.x; int target = j + res->width*k; if(tid < m1->width*m1->height) { //padded ordered_data[threadIdx.y][threadIdx.x] = m1->data[tid] ; } __syncthreads(); //transposed_data[thy+thx*m1->height] = ordered_data[tid] ; if(thx < m1->width && thy< m1->height) { transposed_data[threadIdx.x][threadIdx.y] = ordered_data[threadIdx.y][threadIdx.x]; } __syncthreads(); if(thx < m1->width && thy< m1->height) { res->data [target] = transposed_data[threadIdx.y][threadIdx.x] ; //printf("idy : %d - idx : %d - blockdim x : %d - blockDim y : %d - gridDim.x - %d - gridDim.y : %d\n", thy, thx, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } if(tid ==0) { //printf("\nTransposeVector2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < m1->height) || (blockDim.x*gridDim.x<m1->width)) { printf("\nTransposeVector2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", m1->width, m1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } } __global__ void DisplayVector2D(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("]\n"); printf("Row : %d - Width : %d \n\n", vector->height, vector->width); } __device__ float error_sum[BATCH_SIZE]; __global__ void Sum2D(Vector2D * __restrict__ vec) { int tid = threadIdx.y; int val = 0; int width = vec->width; #pragma unroll 4 for(int a = 0; a < width; a++) { val += vec->data[a+tid*width]; } error_sum[tid] = val; } __device__ int arg_max_result[BATCH_SIZE]; __global__ void ArgMax2D(Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nArgMax2Dvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nArgMax2D\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float max = -100000; int max_index = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { if(vec1->data[tid*vec1->width+a]>max) { max = vec1->data[tid*vec1->width+a]; max_index = a; } } arg_max_result[tid] = max_index; } } __global__ void Softmax(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1) { int tid = blockIdx.y*blockDim.y + threadIdx.y; if(tid ==0) { //printf("\nSoftmaxvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if(blockDim.y*gridDim.y < vec1->height) { printf("\nSoftmax\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->height) { float toplam = 0; #pragma unroll 4 for(int a = 0; a < vec1->width;a++) { toplam += vec1->data[a+tid*vec1->width]; } for(int a = 0; a < vec1->width;a++) { result->data[a+tid*vec1->width] = vec1->data[a+tid*vec1->width]/toplam; } } } __global__ void PointerSet(Vector2D * f1, Vector2D * f2, int shift, int batch_size) { f1->width = f2->width; f1->height = batch_size; f1->data = f2->data + f2->width*shift; } float generate_uniform(float a, float b) { return rand() / (RAND_MAX + 1.0) * (b - a) + a; } Vector2D * CreateWeightMatrix(int input_count, int output_count) { float init_range = 0; Vector2D * temp = (Vector2D *)malloc(sizeof(Vector2D)); Vector2D * device_temp; CHECK(cudaMalloc(&device_temp, sizeof(Vector2D))); temp->height = input_count; //For bias... temp->width = output_count; temp->data = (float * )malloc(sizeof(float)*(input_count)*output_count); init_range = sqrt(2.0 / input_count); for(int a=0; a<(input_count)*output_count; a++) { temp->data[a] = generate_uniform(-init_range, init_range); } float * temp2; CHECK(cudaMalloc(&temp2, sizeof(float)*temp->height*temp->width)); CHECK(cudaMemcpy(temp2, temp->data, sizeof(float)*temp->height*temp->width, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->data, &temp2, sizeof(float *), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->height, &(temp->height), sizeof(int), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(&device_temp->width, &(temp->width), sizeof(int), cudaMemcpyHostToDevice)); return device_temp; } Vector2D * CreateVector2DCPU(float * data, int height, int width) { // A new structure is allocated in memory for matrix/vector... Vector2D * temp = (Vector2D *)malloc(sizeof(struct Vector2D)); temp->data = data; temp->height = height; temp->width = width; return temp; }; Vector2D * CreateOneHot(Vector2D * indexes, int vector_length) { Vector2D * one_hot_vector = (Vector2D*)malloc(sizeof(Vector2D)); one_hot_vector->height = indexes->height; one_hot_vector->width = vector_length; one_hot_vector->size = one_hot_vector->height; one_hot_vector->data = (float *)malloc(sizeof(float)*indexes->height*vector_length); memset(one_hot_vector->data, 0, sizeof(float)*indexes->height*vector_length); for(int i=0; i<one_hot_vector->height;i++) { one_hot_vector->data[i*vector_length+(int)indexes->data[i*indexes->width]] = 1.0; } return one_hot_vector; } void DisplayVector2DCPU(Vector2D * vector) { printf("["); for(int h = 0; h < vector->height; h++) { printf("["); for( int w = 0; w < vector->width-1; w++) { printf("%f, ", vector->data[h*vector->width+w]); } printf("%f], \n", vector->data[h*vector->width+vector->width-1]); } printf("\b\b\b]"); } __global__ void AddandSigmoid(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\AddandSigmoid\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + blockDim.x*3 < vec1->width*vec1->height) { result->data[tid] = 1.0/(1+exp(-(vec1->data[tid] + vec2->data[tid]))); result->data[tid+blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+blockDim.x] + vec2->data[tid+blockDim.x]))); result->data[tid+2*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+2*blockDim.x] + vec2->data[tid+2*blockDim.x]))); result->data[tid+3*blockDim.x] = 1.0/(1+exp(-(vec1->data[tid+3*blockDim.x] + vec2->data[tid+3*blockDim.x]))); } } __global__ void AddandExponential(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********Matrix add diff dimension...."); return; } int tx = blockIdx.x*blockDim.x+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) {//printf("\nMatrixAddvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width)) { printf("\AddandExponential\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid < vec1->width*vec1->height) { result->data[tid] = exp(vec1->data[tid] + vec2->data[tid]); } } //Combination of matrixpairwise-Scalarminus-matrixpairwise in backpropagte.... __global__ void LayerErrorCalculate(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid + 3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * vec2->data[tid]*(1-vec2->data[tid]) ; result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * vec2->data[tid+blockDim.x]*(1-vec2->data[tid+blockDim.x]) ; result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * vec2->data[tid+2*blockDim.x]*(1-vec2->data[tid+2*blockDim.x]) ; result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * vec2->data[tid+3*blockDim.x]*(1-vec2->data[tid+3*blockDim.x]) ; } } __global__ void ApplyWeightChange(Vector2D * __restrict__ result, float learning_rate, Vector2D * __restrict__ source) { if((result->width != source->width) || (result->height != source->height)) { printf("\n\n**********ScalarMatrixProduct dimensionç...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*source->width+tx; if(tid ==0) { //printf("\nScalarMatrixProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < source->height) || (blockDim.x*gridDim.x<source->width/4)) { printf("\nScalarMatrixProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", source->width, source->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< source->width*source->height) { result->data[tid] += learning_rate*source->data[tid]; result->data[tid+blockDim.x] += learning_rate*source->data[tid+blockDim.x]; result->data[tid+2*blockDim.x] += learning_rate*source->data[tid+2*blockDim.x]; result->data[tid+3*blockDim.x] += learning_rate*source->data[tid+3*blockDim.x]; } } __global__ void Vector2DInfo(Vector2D * vec) { printf("\n\nWidth : %d - height : %d\n\n", vec->width, vec->height); } __global__ void calculateCrossEntropyLoss(Vector2D * __restrict__ result, Vector2D * __restrict__ vec1, Vector2D * __restrict__ vec2) { if((vec1->width != vec2->width) || (vec1->height != vec2->height)) { printf("\n\n**********MatrixPairwiseProduct dimension...."); return; } int tx = blockIdx.x*blockDim.x*4+ threadIdx.x; int ty = blockIdx.y*blockDim.y + threadIdx.y; int tid = ty*vec1->width+tx; if(tid ==0) { //printf("\nMatrixPairwiseProductvec->width : %d vec->height : %d - x dim %d y dim %d\n", vec1->width, vec1->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); if((blockDim.y*gridDim.y < vec1->height) || (blockDim.x*gridDim.x<vec1->width/4)) { printf("\nMatrixPairwiseProduct\n"); printf("vec->width : %d vec->height : %d - x dim %d y dim %d", vec1->width, vec2->height, blockDim.x*gridDim.x, blockDim.y*gridDim.y); } } if(tid +3*blockDim.x< vec1->width*vec1->height) { result->data[tid] = vec1->data[tid] * log(vec2->data[tid]); result->data[tid+blockDim.x] = vec1->data[tid+blockDim.x] * log(vec2->data[tid+blockDim.x]); result->data[tid+2*blockDim.x] = vec1->data[tid+2*blockDim.x] * log(vec2->data[tid+2*blockDim.x]); result->data[tid+3*blockDim.x] = vec1->data[tid+3*blockDim.x] * log(vec2->data[tid+3*blockDim.x]); } } #define EMPTY printf("\n\n"); Vector2D * w1, * w2, * b1, * b2; Vector2D * output_1, * output_2; Vector2D * bias_result_1, * bias_result_2; Vector2D * ones, * ones_transpose; void FeedForward(Vector2D * device_input, int batch_size) { //input * w1 dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); MatrixProductShared<<<grid, block>>>(output_1, device_input, w1); cudaDeviceSynchronize(); //transpose ones * b1 MatrixProductShared<<<grid, block>>>(bias_result_1, ones_transpose, b1); cudaDeviceSynchronize(); int temp = grid.x ; grid.x /=4; if(grid.x == 0)grid.x = 1; AddandSigmoid<<<grid, block>>>(output_1, output_1, bias_result_1); cudaDeviceSynchronize(); grid.x =temp; /* //bias1 + input*w1 MatrixAdd<<<grid, block>>>(output_1, output_1, bias_result_1); cudaDeviceSynchronize(); // output of hidden layer... Sigmoid<<<grid, block>>>(output_1, output_1); cudaDeviceSynchronize(); */ //output of hidden layer * w2 grid.x = (OUTPUT_NODE_COUNT+block.x-1)/block.x; grid.y = (batch_size+block.y-1)/block.y; MatrixProductShared<<<grid, block>>>(output_2, output_1, w2); cudaDeviceSynchronize(); //transpose ones * b2 MatrixProductShared<<<grid, block>>>(bias_result_2, ones_transpose, b2); cudaDeviceSynchronize(); AddandExponential<<<grid, block>>>(output_2, output_2, bias_result_2); cudaDeviceSynchronize(); /* //bias2 + output of hidden layer * w2 - final output.... MatrixAdd<<<grid, block>>>(output_2, output_2, bias_result_2); cudaDeviceSynchronize(); Exponential<<<grid, block>>>(output_2, output_2); cudaDeviceSynchronize(); */ grid.x = 1; block.x = 1; Softmax<<<grid, block>>>(output_2, output_2); cudaDeviceSynchronize(); } Vector2D * layer_2_error, * layer_1_error; Vector2D * w1_update, * w2_update, * b1_update, * b2_update; Vector2D * output_1_transpose, * input_transpose; Vector2D * label_data; Vector2D * device_whole_label; Vector2D * device_whole_data; Vector2D * device_input; Vector2D * w2_transpose; Vector2D * scalar_minus; Vector2D * batch_data; Vector2D * batch_label; void BackPropagate(Vector2D * data, Vector2D * label, int batch_size) { FeedForward(data, batch_size); int temp; //Output error calculation dim3 block(BLOCK_X, BLOCK_Y); dim3 grid((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); temp = grid.x ; grid.x /= 4; if(grid.x ==0 )grid.x = 1; MatrixSubtract<<<grid, block>>>(layer_2_error, label, output_2); grid.x = temp; //output1 transpose dim3 grid2((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); TransposeVector2DShared<<<grid2, block>>>(output_1_transpose, output_1); cudaDeviceSynchronize(); //W2 update... dim3 grid3((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); MatrixProductShared<<<grid3, block>>>(w2_update, output_1_transpose, layer_2_error); //b2 update dim3 grid4((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); MatrixProductShared<<<grid4, block>>>(b2_update, ones, layer_2_error); //W2 transpose dim3 grid5((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); TransposeVector2DShared<<<grid5, block>>>(w2_transpose, w2); cudaDeviceSynchronize(); //Layer 1 error dim3 grid6((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); MatrixProductShared<<<grid6, block>>>(layer_1_error, layer_2_error, w2_transpose); cudaDeviceSynchronize(); temp = grid.x; grid.x /=4; if(grid.x == 0)grid.x = 1; LayerErrorCalculate<<<grid6, block>>>(layer_1_error, layer_1_error, output_1); grid.x =temp; /* MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, output_1); cudaDeviceSynchronize(); ScalarMinusVector2D<<<grid6, block>>>(scalar_minus, 1.0, output_1); cudaDeviceSynchronize(); MatrixPairwiseProduct<<<grid6, block>>>(layer_1_error, layer_1_error, scalar_minus); cudaDeviceSynchronize(); */ //Input transpose dim3 grid7((INPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); TransposeVector2DShared<<<grid7, block>>>(input_transpose, data); cudaDeviceSynchronize(); //w1 update.... dim3 grid8((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); MatrixProductShared<<<grid8, block>>>(w1_update, input_transpose, layer_1_error); //b1 update dim3 grid9((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); MatrixProductShared<<<grid9, block>>>(b1_update, ones, layer_1_error); cudaDeviceSynchronize(); //Burası //w2_update * learning rate dim3 grid10((OUTPUT_NODE_COUNT+block.x-1)/block.x, (HIDDEN_LAYER_NODE_COUNT+block.y-1)/block.y); temp = grid10.x ; grid10.x /= 4; if(grid10.x ==0)grid10.x = 1; ApplyWeightChange<<<grid10, block>>>(w2, learning_rate, w2_update); grid10.x = temp; /*ScalarMatrixProduct<<<grid10, block>>>(w2_update, learning_rate, w2_update); cudaDeviceSynchronize(); //Apply w2 update MatrixAdd<<<grid10, block>>>(w2, w2, w2_update); cudaDeviceSynchronize(); */ //b2_update * learning_rate dim3 grid11((OUTPUT_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid11.x ; grid11.x /= 4; if(grid11.x == 0) grid11.x = 1; ApplyWeightChange<<<grid11, block>>>(b2, learning_rate, b2_update); grid11.x = temp; /*ScalarMatrixProduct<<<grid11, block>>>(b2_update, learning_rate, b2_update); cudaDeviceSynchronize(); //Apply b2 update MatrixAdd<<<grid11, block>>>(b2, b2, b2_update); cudaDeviceSynchronize(); */ //w1_update * leraning_rate dim3 grid12((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (INPUT_NODE_COUNT+block.y-1)/block.y); temp = grid12.x; grid12.x /= 4; if(grid12.x == 0)grid12.x = 1; ApplyWeightChange<<<grid12, block>>>(w1, learning_rate, w1_update); /* ScalarMatrixProduct<<<grid12, block>>>(w1_update, learning_rate, w1_update); cudaDeviceSynchronize(); //Apply w1 update MatrixAdd<<<grid12, block>>>(w1, w1, w1_update); cudaDeviceSynchronize(); */ dim3 grid13((HIDDEN_LAYER_NODE_COUNT+block.x-1)/block.x, (1+block.y-1)/block.y); temp = grid13.x; grid13.x /= 4; if(grid13.x == 0)grid13.x = 1; ApplyWeightChange<<<grid13, block>>>(b1, learning_rate, b1_update); /* ScalarMatrixProduct<<<grid13, block>>>(b1_update, learning_rate, b1_update); cudaDeviceSynchronize(); //Apply b1 update MatrixAdd<<<grid13, block>>>(b1, b1, b1_update); cudaDeviceSynchronize(); */ cudaDeviceSynchronize(); } Vector2D * load_text_data() { FILE * dosya = fopen("text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_label_data() { FILE * dosya = fopen("label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_text_data() { FILE * dosya = fopen("test_text_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float*)malloc(width*height*sizeof(float)); for(int a =0; a< width*height; a++) fread(&loaded_data[a], sizeof(float), 1, dosya); fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } Vector2D * load_test_label_data() { FILE * dosya = fopen("test_label_data.dat", "rb"); int width, height; fread(&width, sizeof(int), 1, dosya); fread(&height, sizeof(int), 1, dosya); float * loaded_data = (float *)malloc(width*height*sizeof(float)); int value; for(int a =0; a< width*height; a++) { fread(&value, sizeof(int), 1, dosya); loaded_data[a] = value; } fclose(dosya); printf("Width : %d - Height : %d\n", width, height); Vector2D * vec = CreateVector2DCPU(loaded_data, height, width); return vec; } int train_count = 50000; int test_count = 10000; Vector2D * device_whole_test_data, * device_whole_test_label_data; int main() { clock_t error_calculation_start, error_calculation_end; clock_t train_start, train_end; clock_t program_start, program_end ; program_start = clock(); clock_t execution_start, execution_end; execution_start = clock(); srand(time(0)); int blockx = 32, blocky = 32; dim3 block(blockx, blocky); int batch_size = BATCH_SIZE; float * ones_ = (float *)malloc(sizeof(float)*batch_size); for(int a = 0; a< batch_size;a++)ones_[a] = 1.0; ones = CreateVector2D(ones_, 1, batch_size, true); ones_transpose = CreateVector2D(ones_, batch_size, 1, true); //first hidden layer 160 input 784 w1 = CreateWeightMatrix(INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT); b1 = CreateWeightMatrix(1, HIDDEN_LAYER_NODE_COUNT); bias_result_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1 = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); output_1_transpose = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, batch_size, false); w1_update = CreateVector2D(NULL, INPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b1_update = CreateVector2D(NULL, 1, HIDDEN_LAYER_NODE_COUNT, false); //output 10 nodes.... w2 = CreateWeightMatrix(HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT); w2_transpose = CreateVector2D(NULL, OUTPUT_NODE_COUNT, HIDDEN_LAYER_NODE_COUNT, false); b2 = CreateWeightMatrix(1, OUTPUT_NODE_COUNT); bias_result_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); output_2 = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); w2_update = CreateVector2D(NULL, HIDDEN_LAYER_NODE_COUNT, OUTPUT_NODE_COUNT, false); b2_update = CreateVector2D(NULL, 1, OUTPUT_NODE_COUNT, false); layer_2_error = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); layer_1_error = CreateVector2D(NULL , batch_size, HIDDEN_LAYER_NODE_COUNT, false); scalar_minus = CreateVector2D(NULL, batch_size, HIDDEN_LAYER_NODE_COUNT, false); input_transpose = CreateVector2D(NULL, INPUT_NODE_COUNT, batch_size, false); load_mnist(); printf("SIZE : %d", SIZE); float * train_data = (float *)malloc(sizeof(float)*train_count*784); for(int h = 0; h < train_count; h++) { for(int w=0; w < 784; w++) { //if(h == 0)printf("%f ", train_image[h][w]); train_data[h*784+w] = train_image[h][w]; } } float * train_label_float = (float *)malloc(sizeof(float)*train_count); for(int h = 0; h < train_count; h++) { train_label_float[h] = train_label[h]; } float * test_data = (float *)malloc(sizeof(float)*test_count*784); for(int h = 0; h < test_count; h++) { for(int w=0; w < 784; w++) { test_data[h*784+w] = test_image[h][w]; } } float * test_label_float = (float *)malloc(sizeof(float)*test_count); for(int h = 0; h < test_count; h++) { test_label_float[h] = test_label[h]; } #define SIZE 784 Vector2D * data_set = CreateVector2DCPU(train_data, train_count, SIZE); Vector2D * image_labels = CreateVector2DCPU(train_label_float, train_count, 1 ); Vector2D * one_hot_labels = CreateOneHot(image_labels, OUTPUT_NODE_COUNT); Vector2D * test_samples = CreateVector2DCPU(test_data, test_count, SIZE); Vector2D * test_label_indices = CreateVector2DCPU(test_label_float, test_count, 1); Vector2D * test_labels = CreateOneHot(test_label_indices, OUTPUT_NODE_COUNT); device_whole_data = CreateVector2D(data_set->data, data_set->height, INPUT_NODE_COUNT); device_whole_label = CreateVector2D(one_hot_labels->data, data_set->height, OUTPUT_NODE_COUNT); batch_data = CreateVector2D(NULL, batch_size, INPUT_NODE_COUNT, false); batch_label = CreateVector2D(NULL, batch_size, OUTPUT_NODE_COUNT, false); device_whole_test_data = CreateVector2D(test_samples->data, test_samples->height, INPUT_NODE_COUNT); device_whole_test_label_data = CreateVector2D(test_labels->data, test_labels->height, OUTPUT_NODE_COUNT); Vector2D * batch_test_data, * batch_label_data; batch_test_data = CreateVector2D(NULL, batch_size, test_samples->width, false); batch_label_data = CreateVector2D(NULL, batch_size, test_labels->width, false); int predicted_labels[BATCH_SIZE]; int correct_number = 0, false_number = 0; double error_val[BATCH_SIZE]; double toplam = 0; #define ITERATION_COUNT 100 for(int iteration = 0 ; iteration < ITERATION_COUNT;iteration++) { error_calculation_start = clock(); /*for(int batch_index = 1; batch_index < data_set->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_data, device_whole_data, (batch_index -1)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label, device_whole_label, (batch_index-1)*batch_size, batch_size); cudaDeviceSynchronize(); FeedForward(batch_data, batch_size); } */ train_start = clock(); for(int batch_index = 1; batch_index < data_set->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_data, device_whole_data, (batch_index -1)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label, device_whole_label, (batch_index-1)*batch_size, batch_size); cudaDeviceSynchronize(); BackPropagate(batch_data, batch_label, batch_size); dim3 gridd((OUTPUT_NODE_COUNT+block.x-1)/block.x, (batch_size+block.y-1)/block.y); toplam = 0; int temp; temp = gridd.x ; gridd.x /= 4; if(gridd.x == 0)gridd.x = 1; calculateCrossEntropyLoss<<<gridd, block>>>(layer_2_error, batch_label, output_2); gridd.x = temp; cudaDeviceSynchronize(); dim3 k(1, BATCH_SIZE); Sum2D<<<1, k>>>(layer_2_error); cudaDeviceSynchronize(); cudaMemcpyFromSymbol( &error_val, error_sum, sizeof(float)*BATCH_SIZE); //printf("\n\nIteration %d - Error : %f\n", iteration, error_val); cudaDeviceSynchronize(); for(int a=0;a<BATCH_SIZE;a++) toplam += -error_val[a]; printf("\nITeration %d batch %d error %f \n", iteration, batch_index, toplam); printf("\n\nTest Accuracy : %f", (float(correct_number)/(correct_number+false_number)*100.0)); } correct_number = 0 ; false_number = 0; for(int batch_index = 0; batch_index < test_labels->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_test_data, device_whole_test_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label_data, device_whole_test_label_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); //printf("\nFeed forward...\n"); FeedForward(batch_test_data, batch_size); //printf("\nArgmax2d\n"); dim3 block(1 , BATCH_SIZE); ArgMax2D<<<1, block>>>(output_2); cudaDeviceSynchronize(); cudaMemcpyFromSymbol( &predicted_labels, arg_max_result, sizeof(int)*BATCH_SIZE); cudaDeviceSynchronize(); for(int i = 0; i < BATCH_SIZE;i++) { if( abs(predicted_labels[i] - test_label_indices->data[i + BATCH_SIZE*batch_index*test_label_indices->width]) < 0.1) { correct_number ++; } else false_number ++; } /*printf("\nCorrect output : \n"); DisplayVector2D<<<1, 1>>>(batch_label_data); cudaDeviceSynchronize(); */ } train_end = clock(); printf("\nIteration %d - Whole data train time : %f\n\n", iteration, (double)(train_end - train_start) / CLOCKS_PER_SEC); } printf("\nTraining has finished...\n"); execution_end = clock(); printf("\nWhole data train time : %f\n\n", (double)(execution_end - execution_start) / CLOCKS_PER_SEC); for(int batch_index = 0; batch_index < test_labels->height/batch_size; batch_index++) { PointerSet<<<1 ,1>>>(batch_test_data, device_whole_test_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); PointerSet<<<1 ,1>>>(batch_label_data, device_whole_test_label_data, (batch_index)*batch_size, batch_size); cudaDeviceSynchronize(); //printf("\nFeed forward...\n"); FeedForward(batch_test_data, batch_size); //printf("\nArgmax2d\n"); dim3 block(1 , BATCH_SIZE); ArgMax2D<<<1, block>>>(output_2); cudaDeviceSynchronize(); cudaMemcpyFromSymbol( &predicted_labels, arg_max_result, sizeof(int)*BATCH_SIZE); cudaDeviceSynchronize(); for(int i = 0; i < BATCH_SIZE;i++) { if( abs(predicted_labels[i] - test_label_indices->data[i + BATCH_SIZE*batch_index*test_label_indices->width]) < 0.1) { correct_number ++; } else false_number ++; } /*printf("\nCorrect output : \n"); DisplayVector2D<<<1, 1>>>(batch_label_data); cudaDeviceSynchronize(); */ } printf("\n\nAccuracy : %f", (float(correct_number)/(correct_number+false_number)*100.0)); printf("\nTamam\n"); cudaDeviceReset(); program_end = clock(); printf("\Program execution time : %f\n\n", (double)(program_end- program_start) / CLOCKS_PER_SEC); }
c76223d8739dc40dcf5fb97a966fb5b9dac0b922.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "remove_white.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( remove_white), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( remove_white), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( remove_white), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c76223d8739dc40dcf5fb97a966fb5b9dac0b922.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "remove_white.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); remove_white<<<gridBlock,threadBlock>>>(x,y,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { remove_white<<<gridBlock,threadBlock>>>(x,y,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { remove_white<<<gridBlock,threadBlock>>>(x,y,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
0cc5db9ed311d66fbc79c193cd162a7a9848207e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Parallel bitonic sort using CUDA. * Compile with * nvcc -arch=sm_11 bitonic_sort.cu * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm * License: BSD 3 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 512 // 2^9 #define NIM1 13515141 #define NIM2 13515147 #define SWAP(x,y) t = x; x = y; y = t; char* input_path = "data/input"; char* output_path = "data/output"; FILE* input_file; FILE* output_file; const int up = 1; const int down = 0; int * array; int array_size; int NUM_VALS; int BLOCKS; void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.0f microsecond\n", elapsed * 1000000.0); } int random_int() { return (int)rand(); } void array_print(int *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%d ", arr[i]); } printf("\n"); } void array_fill(int *arr, int length) { srand(13515147); int i; for (i = 0; i < length; ++i) { arr[i] = random_int(); } } __global__ void bitonic_sort_step(int *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } /** * Inplace bitonic sort using CUDA. */ void bitonic_sort(int *values) { int *dev_values; size_t size = NUM_VALS * sizeof(int); hipMalloc((void**) &dev_values, size); hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ int j, k; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { hipLaunchKernelGGL(( bitonic_sort_step), dim3(blocks), dim3(threads), 0, 0, dev_values, j, k); } } hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost); hipFree(dev_values); } void compare(int i, int j, int dir){ int t; if (dir == (array[i] > array[j])){ SWAP(array[i], array[j]); } } /** * Returns the greatest power of two number that is less than n */ int greatestPowerOfTwoLessThan(int n){ int k=1; while(k>0 && k<n) k=k<<1; return k>>1; } /** * Sorts a bitonic sequence in ascending order if dir=1 * otherwise in descending order */ void bitonicMerge(int low, int c, int dir){ int k, i; if (c > 1){ k = greatestPowerOfTwoLessThan(c); for (i = low;i < low+c-k ;i++) compare(i, i+k, dir); bitonicMerge(low, k, dir); bitonicMerge(low+k, c-k, dir); } } /** * Generates bitonic sequence by sorting recursively * two halves of the array in opposite sorting orders * bitonicMerge will merge the resultant array */ void recursiveBitonic(int low, int c, int dir){ int k; if (c > 1) { k = c / 2; recursiveBitonic(low, k, !dir); recursiveBitonic(low + k, c-k, dir); bitonicMerge(low, c, dir); } } /** * Sort array with serial bitonic sorting */ void sort_serial(){ recursiveBitonic(0, array_size, up); } /** * Check if global array is sorted */ int is_sorted() { int i; for (i=0; i<array_size-1; i++) { if (array[i] > array[i+1]) return 0; } return 1; } int main(int argc, char * argv[]) { input_file = fopen(input_path, "w"); output_file = fopen(output_path, "w"); clock_t start, stop; array_size = atoi(argv[1]); NUM_VALS=array_size; BLOCKS=NUM_VALS/512; array = (int*) malloc( NUM_VALS * sizeof(int)); array_fill(array, NUM_VALS); int i; for (i = 0; i < array_size; i++){ fprintf(input_file, "%d\n", array[i]); } fclose(input_file); start = clock(); sort_serial(); stop = clock(); printf("[SERIAL]\n"); if (is_sorted()) { printf("Sorting successful\n"); } else { printf("Sorting failed\n"); } print_elapsed(start, stop); free(array); array = (int*) malloc( NUM_VALS * sizeof(int)); array_fill(array, NUM_VALS); start = clock(); bitonic_sort(array); /* Inplace */ stop = clock(); printf("[PARALEL]\n"); if (is_sorted()) { printf("Sorting successful\n"); } else { printf("Sorting failed\n"); } for (i = 0; i < array_size; i++){ fprintf(output_file, "%d\n", array[i]); } fclose(output_file); print_elapsed(start, stop); }
0cc5db9ed311d66fbc79c193cd162a7a9848207e.cu
/* * Parallel bitonic sort using CUDA. * Compile with * nvcc -arch=sm_11 bitonic_sort.cu * Based on http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm * License: BSD 3 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 512 // 2^9 #define NIM1 13515141 #define NIM2 13515147 #define SWAP(x,y) t = x; x = y; y = t; char* input_path = "data/input"; char* output_path = "data/output"; FILE* input_file; FILE* output_file; const int up = 1; const int down = 0; int * array; int array_size; int NUM_VALS; int BLOCKS; void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.0f microsecond\n", elapsed * 1000000.0); } int random_int() { return (int)rand(); } void array_print(int *arr, int length) { int i; for (i = 0; i < length; ++i) { printf("%d ", arr[i]); } printf("\n"); } void array_fill(int *arr, int length) { srand(13515147); int i; for (i = 0; i < length; ++i) { arr[i] = random_int(); } } __global__ void bitonic_sort_step(int *dev_values, int j, int k) { unsigned int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (dev_values[i]>dev_values[ixj]) { /* exchange(i,ixj); */ int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } if ((i&k)!=0) { /* Sort descending */ if (dev_values[i]<dev_values[ixj]) { /* exchange(i,ixj); */ int temp = dev_values[i]; dev_values[i] = dev_values[ixj]; dev_values[ixj] = temp; } } } } /** * Inplace bitonic sort using CUDA. */ void bitonic_sort(int *values) { int *dev_values; size_t size = NUM_VALS * sizeof(int); cudaMalloc((void**) &dev_values, size); cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice); dim3 blocks(BLOCKS,1); /* Number of blocks */ dim3 threads(THREADS,1); /* Number of threads */ int j, k; /* Major step */ for (k = 2; k <= NUM_VALS; k <<= 1) { /* Minor step */ for (j=k>>1; j>0; j=j>>1) { bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k); } } cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost); cudaFree(dev_values); } void compare(int i, int j, int dir){ int t; if (dir == (array[i] > array[j])){ SWAP(array[i], array[j]); } } /** * Returns the greatest power of two number that is less than n */ int greatestPowerOfTwoLessThan(int n){ int k=1; while(k>0 && k<n) k=k<<1; return k>>1; } /** * Sorts a bitonic sequence in ascending order if dir=1 * otherwise in descending order */ void bitonicMerge(int low, int c, int dir){ int k, i; if (c > 1){ k = greatestPowerOfTwoLessThan(c); for (i = low;i < low+c-k ;i++) compare(i, i+k, dir); bitonicMerge(low, k, dir); bitonicMerge(low+k, c-k, dir); } } /** * Generates bitonic sequence by sorting recursively * two halves of the array in opposite sorting orders * bitonicMerge will merge the resultant array */ void recursiveBitonic(int low, int c, int dir){ int k; if (c > 1) { k = c / 2; recursiveBitonic(low, k, !dir); recursiveBitonic(low + k, c-k, dir); bitonicMerge(low, c, dir); } } /** * Sort array with serial bitonic sorting */ void sort_serial(){ recursiveBitonic(0, array_size, up); } /** * Check if global array is sorted */ int is_sorted() { int i; for (i=0; i<array_size-1; i++) { if (array[i] > array[i+1]) return 0; } return 1; } int main(int argc, char * argv[]) { input_file = fopen(input_path, "w"); output_file = fopen(output_path, "w"); clock_t start, stop; array_size = atoi(argv[1]); NUM_VALS=array_size; BLOCKS=NUM_VALS/512; array = (int*) malloc( NUM_VALS * sizeof(int)); array_fill(array, NUM_VALS); int i; for (i = 0; i < array_size; i++){ fprintf(input_file, "%d\n", array[i]); } fclose(input_file); start = clock(); sort_serial(); stop = clock(); printf("[SERIAL]\n"); if (is_sorted()) { printf("Sorting successful\n"); } else { printf("Sorting failed\n"); } print_elapsed(start, stop); free(array); array = (int*) malloc( NUM_VALS * sizeof(int)); array_fill(array, NUM_VALS); start = clock(); bitonic_sort(array); /* Inplace */ stop = clock(); printf("[PARALEL]\n"); if (is_sorted()) { printf("Sorting successful\n"); } else { printf("Sorting failed\n"); } for (i = 0; i < array_size; i++){ fprintf(output_file, "%d\n", array[i]); } fclose(output_file); print_elapsed(start, stop); }
12e958a543831cf5a47b6bc6dd4ec05ab1ab5d5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { hipLaunchKernelGGL(( adjustHueCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->shapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->shapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
12e958a543831cf5a47b6bc6dd4ec05ab1ab5d5d.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma ([email protected]) // #include <ops/declarable/helpers/adjust_hue.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static void _CUDA_G adjustHueCuda(const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong *zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const T delta, const int dimC) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ int rank; __shared__ Nd4jLong xDimCstride, zDimCstride; if (threadIdx.x == 0) { rank = shape::rank(xShapeInfo); xDimCstride = shape::stride(xShapeInfo)[dimC]; zDimCstride = shape::stride(zShapeInfo)[dimC]; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < numOfTads; i += gridDim.x * blockDim.x) { const T* xTad = x + xTadOffsets[i]; T* zTad = z + zTadOffsets[i]; T h, s, v; rgbToHsv<T>(xTad[0], xTad[xDimCstride], xTad[2 * xDimCstride], h, s, v); h += delta ; if(h > 1) h -= 1; else if(h < 0) h += 1; hsvToRgb<T>(h, s, v, zTad[0], zTad[zDimCstride], zTad[2 * zDimCstride]); } } /////////////////////////////////////////////////////////////////// template<typename T> static _CUDA_H void adjustHueCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xTadOffsets, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zTadOffsets, const Nd4jLong numOfTads, const NDArray* deltaScalarArr, const int dimC) { adjustHueCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, xTadOffsets, vz, zShapeInfo, zTadOffsets, numOfTads, deltaScalarArr->e<T>(0), dimC); } //////////////////////////////////////////////////////////////////////// void adjustHue(sd::LaunchContext* context, const NDArray *input, const NDArray* deltaScalarArr, NDArray *output, const int dimC) { auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->shapeInfo(), {dimC}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {dimC}); const Nd4jLong numOfTads = packX.numberOfTads(); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (numOfTads + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "adjustHue"); NDArray::prepareSpecialUse({output}, {input, deltaScalarArr}); BUILD_SINGLE_SELECTOR(input->dataType(), adjustHueCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input->specialBuffer(), input->specialShapeInfo(), packX.platformOffsets(), output->specialBuffer(), output->specialShapeInfo(), packZ.platformOffsets(), numOfTads, deltaScalarArr, dimC), FLOAT_TYPES); NDArray::registerSpecialUse({output}, {input, deltaScalarArr}); manager.synchronize(); } /* template <typename T> static void _CUDA_G adjustHueSingleNHWCKernel(void *xBuffer, Nd4jLong *xShapeInfo, void *zBuffer, Nd4jLong *zShapeInfo, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto bIn = reinterpret_cast<T*>(xBuffer); auto bOut = reinterpret_cast<T*>(zBuffer); static const int kChannelRange = 6; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto i = bIn + e * numChannels; auto o = bOut + e * numChannels; T h, v_min, v_max; helpers::rgb_to_hv(i[0], i[1], i[2], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0.) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, o, o + 1, o + 2); } } template <typename T> static void _CUDA_G adjustHueSingleNCHWKernel(void *xBuffer, Nd4jLong *xTadShapeInfo, Nd4jLong *xOffsets, void *zBuffer, Nd4jLong *zTadShapeInfo, Nd4jLong *zOffsets, Nd4jLong tadLength, Nd4jLong tuples, float delta) { int numChannels = 3; auto tid = threadIdx.x + blockIdx.x * blockDim.x; static const int kChannelRange = 6; auto bufferR = reinterpret_cast<T *>(xBuffer) + xOffsets[0]; auto bufferG = reinterpret_cast<T *>(xBuffer) + xOffsets[1]; auto bufferB = reinterpret_cast<T *>(xBuffer) + xOffsets[2]; auto outputR = reinterpret_cast<T *>(zBuffer) + zOffsets[0]; auto outputG = reinterpret_cast<T *>(zBuffer) + zOffsets[1]; auto outputB = reinterpret_cast<T *>(zBuffer) + zOffsets[2]; for (Nd4jLong e = tid; e < tuples; e += blockDim.x * gridDim.x) { auto _ri = bufferR + shape::getIndexOffset(e, xTadShapeInfo); auto _gi = bufferG + shape::getIndexOffset(e, xTadShapeInfo); auto _bi = bufferB + shape::getIndexOffset(e, xTadShapeInfo); auto _ro = outputR + shape::getIndexOffset(e, xTadShapeInfo); auto _go = outputG + shape::getIndexOffset(e, xTadShapeInfo); auto _bo = outputB + shape::getIndexOffset(e, xTadShapeInfo); T h, v_min, v_max; helpers::rgb_to_hv(_ri[0], _gi[0], _bi[0], &h, &v_min, &v_max); h += delta * kChannelRange; while (h < (T) 0) h += (T) kChannelRange; while (h >= (T) kChannelRange) h -= (T) kChannelRange; helpers::hv_to_rgb(h, v_min, v_max, _ro, _go, _bo); } } template <typename T> static void _adjust_hue_single(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { adjustHueSingleNHWCKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), array->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), tuples, delta); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->shapeInfo(), {1, 2}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {1, 2}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } template <typename T> static void _adjust_hue_batch(sd::LaunchContext * context, NDArray *array, NDArray *output, float delta, bool isNHWC) { auto xType = array->dataType(); // numChannels is always 3 auto tuples = array->lengthOf() / 3; if (isNHWC) { // in case of nhwc batch, we don't really care about examples: it's still bunch of RGB values BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, delta, isNHWC);, FLOAT_TYPES); } else { // TODO: check this one auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(array->shapeInfo(), {0, 2, 3}); auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->shapeInfo(), {0, 2, 3}); auto tadLength = shape::length(packX.primaryShapeInfo()); adjustHueSingleNCHWKernel<T><<<256, 256, 1024, *context->getCudaStream()>>>(array->specialBuffer(), packX.platformShapeInfo(), packX.platformOffsets(), output->specialBuffer(), packZ.platformShapeInfo(), packZ.platformOffsets(), tadLength, tuples, delta); } } void _adjust_hue(sd::LaunchContext * context, NDArray *array, NDArray *output, NDArray* delta, bool isNHWC) { auto xType = array->dataType(); float d = delta->e<float>(0); if (array->rankOf() == 4) { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_batch, (context, array, output, d, isNHWC);, FLOAT_TYPES); } else { BUILD_SINGLE_SELECTOR(xType, _adjust_hue_single, (context, array, output, d, isNHWC);, FLOAT_TYPES); } } */ } } }
b5bdcf07007e36e97640cbfe9206968c50b96780.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include"matrix.h" #include <cudnn.h> #include <vector> #include <chrono> #define IDX2C(i,j,ld) (((i)*(ld))+(j)) #define CUDA_CALL(f) { \ hipError_t err = (f); \ if (err != hipSuccess) { \ std::cout \ << __LINE__ <<":Error occurred: " << err<< std::endl; \ std::exit(1); \ } \ } #define CUDNN_CALL(f) { \ cudnnStatus_t err = (f); \ if (err != CUDNN_STATUS_SUCCESS) { \ std::cout \ << " Error occurred: " << err << std::endl; \ std::exit(1); \ } \ } template<typename T> void padding(vector<T*>& mat, int& col, int& row, int pad_size, int channels) { col += 2 * pad_size; row += 2 * pad_size; vector<T*>input(channels); #pragma omp parallel for for (int i = 0; i < channels; i++) { input[i] = new T[col * row]; memset(&input[i][0], 0, col * row * sizeof(T)); for (int k = pad_size; k < col - pad_size; k++) { for (int j = pad_size; j < row - pad_size; j++) { input[i][IDX2C(k, j, row)] = mat[i][IDX2C(k - pad_size, j - pad_size, row - 2 * pad_size)]; } } delete[]mat[i]; } mat = input; } template<typename T> __global__ void conv2d(T* mat, int* kernel, T* res, int col, int row, int stride, int kernel_size, int res_col, int res_row) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; T sum = 0; if (i < res_col && j < res_row) { for (int x = 0; x < kernel_size; x++) { for (int y = 0; y < kernel_size; y++) { sum += mat[IDX2C(i * stride + x, j * stride + y, row)] * kernel[IDX2C(x, y, kernel_size)]; } } res[IDX2C(i, j, res_row)] = sum; } } int main(int argc, char* argv[]) { if (argc != 7) { fprintf(stderr, "usage: TARGET [in_channels] [height] [width] [stride] [thread.x] [thread.y]\n"); return -1; } int col = atoi(argv[2]), row = atoi(argv[3]), kernel_size = 3, stride = atoi(argv[4]), n_channels = atoi(argv[1]), thread_x = atoi(argv[5]), thread_y = atoi(argv[6]); int pad_size = 0; int res_col; int res_row; dim3 numBlocks; //pad_size = (kernel_size - 1) / 2; auto threadsPerBlock = dim3(thread_x, thread_y); auto input = getMat<float>(r, col, row, n_channels); vector<float*> d_input(n_channels, nullptr); vector<float*> output(n_channels, nullptr), d_output(n_channels, nullptr); vector<int*> d_kernel(n_channels, nullptr); vector<vector<int>>kernel(n_channels, vector<int>(kernel_size * kernel_size)); padding(input, col, row, pad_size, n_channels); res_col = (col - kernel_size) / stride + 1; res_row = (row - kernel_size) / stride + 1; for (int i = 0; i < n_channels; i++) { //print(std::cout, input[i], col, row ); numBlocks = dim3(res_col, res_row); output[i] = new float[res_col * res_row]; kernel[i] = { 0,1,0,1,-4,1,0,1,0 }; CUDA_CALL(hipMalloc(&d_input[i], col * row * sizeof(d_input[0][0]))); CUDA_CALL(hipMalloc(&d_output[i], res_col * res_row * sizeof(d_output[0][0]))); CUDA_CALL(hipMalloc(&d_kernel[i], kernel_size * kernel_size * sizeof(kernel[0][0]))); CUDA_CALL(hipMemcpy(d_input[i], input[i], sizeof(float) * col * row, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpy(d_kernel[i], &kernel[i][0], sizeof(int) * kernel_size * kernel_size, hipMemcpyHostToDevice)); } auto timeStart = std::chrono::high_resolution_clock::now(); for (int i = 0; i < n_channels; i++) { conv2d << <numBlocks, threadsPerBlock >> > (d_input[i], d_kernel[i], d_output[i], col, row, stride, kernel_size, res_col, res_row); } auto timeEnd = std::chrono::high_resolution_clock::now(); for (int i = 0; i < n_channels; i++) { CUDA_CALL(hipMemcpy(output[i], d_output[i], sizeof(float) * res_col * res_row, hipMemcpyDeviceToHost)); //print(std::cout, output[i], res_col, res_row); } auto passedTime = std::chrono::duration<float, std::milli>(timeEnd - timeStart).count(); fprintf(stdout, "Conv2d Done: %.5f (ms)\n", passedTime); for (int i = 0; i < 3; i++) { hipFree(&d_input[i]); hipFree(&d_output[i]); hipFree(&d_kernel[i]); } //save_img("2.png", output, res_col, res_row); }
b5bdcf07007e36e97640cbfe9206968c50b96780.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include"matrix.h" #include <cudnn.h> #include <vector> #include <chrono> #define IDX2C(i,j,ld) (((i)*(ld))+(j)) #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ std::cout \ << __LINE__ <<":Error occurred: " << err<< std::endl; \ std::exit(1); \ } \ } #define CUDNN_CALL(f) { \ cudnnStatus_t err = (f); \ if (err != CUDNN_STATUS_SUCCESS) { \ std::cout \ << " Error occurred: " << err << std::endl; \ std::exit(1); \ } \ } template<typename T> void padding(vector<T*>& mat, int& col, int& row, int pad_size, int channels) { col += 2 * pad_size; row += 2 * pad_size; vector<T*>input(channels); #pragma omp parallel for for (int i = 0; i < channels; i++) { input[i] = new T[col * row]; memset(&input[i][0], 0, col * row * sizeof(T)); for (int k = pad_size; k < col - pad_size; k++) { for (int j = pad_size; j < row - pad_size; j++) { input[i][IDX2C(k, j, row)] = mat[i][IDX2C(k - pad_size, j - pad_size, row - 2 * pad_size)]; } } delete[]mat[i]; } mat = input; } template<typename T> __global__ void conv2d(T* mat, int* kernel, T* res, int col, int row, int stride, int kernel_size, int res_col, int res_row) { const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; T sum = 0; if (i < res_col && j < res_row) { for (int x = 0; x < kernel_size; x++) { for (int y = 0; y < kernel_size; y++) { sum += mat[IDX2C(i * stride + x, j * stride + y, row)] * kernel[IDX2C(x, y, kernel_size)]; } } res[IDX2C(i, j, res_row)] = sum; } } int main(int argc, char* argv[]) { if (argc != 7) { fprintf(stderr, "usage: TARGET [in_channels] [height] [width] [stride] [thread.x] [thread.y]\n"); return -1; } int col = atoi(argv[2]), row = atoi(argv[3]), kernel_size = 3, stride = atoi(argv[4]), n_channels = atoi(argv[1]), thread_x = atoi(argv[5]), thread_y = atoi(argv[6]); int pad_size = 0; int res_col; int res_row; dim3 numBlocks; //pad_size = (kernel_size - 1) / 2; auto threadsPerBlock = dim3(thread_x, thread_y); auto input = getMat<float>(r, col, row, n_channels); vector<float*> d_input(n_channels, nullptr); vector<float*> output(n_channels, nullptr), d_output(n_channels, nullptr); vector<int*> d_kernel(n_channels, nullptr); vector<vector<int>>kernel(n_channels, vector<int>(kernel_size * kernel_size)); padding(input, col, row, pad_size, n_channels); res_col = (col - kernel_size) / stride + 1; res_row = (row - kernel_size) / stride + 1; for (int i = 0; i < n_channels; i++) { //print(std::cout, input[i], col, row ); numBlocks = dim3(res_col, res_row); output[i] = new float[res_col * res_row]; kernel[i] = { 0,1,0,1,-4,1,0,1,0 }; CUDA_CALL(cudaMalloc(&d_input[i], col * row * sizeof(d_input[0][0]))); CUDA_CALL(cudaMalloc(&d_output[i], res_col * res_row * sizeof(d_output[0][0]))); CUDA_CALL(cudaMalloc(&d_kernel[i], kernel_size * kernel_size * sizeof(kernel[0][0]))); CUDA_CALL(cudaMemcpy(d_input[i], input[i], sizeof(float) * col * row, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpy(d_kernel[i], &kernel[i][0], sizeof(int) * kernel_size * kernel_size, cudaMemcpyHostToDevice)); } auto timeStart = std::chrono::high_resolution_clock::now(); for (int i = 0; i < n_channels; i++) { conv2d << <numBlocks, threadsPerBlock >> > (d_input[i], d_kernel[i], d_output[i], col, row, stride, kernel_size, res_col, res_row); } auto timeEnd = std::chrono::high_resolution_clock::now(); for (int i = 0; i < n_channels; i++) { CUDA_CALL(cudaMemcpy(output[i], d_output[i], sizeof(float) * res_col * res_row, cudaMemcpyDeviceToHost)); //print(std::cout, output[i], res_col, res_row); } auto passedTime = std::chrono::duration<float, std::milli>(timeEnd - timeStart).count(); fprintf(stdout, "Conv2d Done: %.5f (ms)\n", passedTime); for (int i = 0; i < 3; i++) { cudaFree(&d_input[i]); cudaFree(&d_output[i]); cudaFree(&d_kernel[i]); } //save_img("2.png", output, res_col, res_row); }
8dd7216941a20b74ebed5e5113f16016c4a9bc2c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template<typename T> __device__ void shuffleKernel(void **vdX, Nd4jLong **dxShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { // we assume that shuffle map for each X contains pair TAD Y auto dX = reinterpret_cast<T **>(vdX); auto dZ = reinterpret_cast<T **>(vdZ); __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; __shared__ Nd4jLong* xShapeInfo; __shared__ Nd4jLong xLength; for (int f = 0; f < N; f++) { auto x = reinterpret_cast<T *>(dX[f]); auto z = reinterpret_cast<T *>(dZ[f]); if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo[f]); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]); xShapeInfo = dxShapeInfo[f]; xLength = shape::length(xShapeInfo); numTads = xLength / tadLength; } __syncthreads(); if (shape::rank(xShapeInfo) == 1) { uint tid = threadIdx.x + blockIdx.x * gridDim.x; for (uint r = tid; r < xLength; r += gridDim.x * blockDim.x) { auto swapIndex = shuffleMap[r]; if (swapIndex >= 0) { uint idx = r * tadEWS; uint swap = swapIndex * tadEWS; T oldX = x[idx]; z[idx] = x[swap]; z[swap] = oldX; } } } else { // we roll over the pairs of TADs, thus limit is numTads / 2 for (uint r = blockIdx.x; r < numTads; r += gridDim.x) { if (shuffleMap[r] >= 0) { auto oldOffset = tadOffsets[f][r]; auto newOffset = tadOffsets[f][shuffleMap[r]]; auto rX = x + oldOffset; auto rY = x + newOffset; auto zX = z + oldOffset; auto zY = z + newOffset; // so we're going to change TAD[oldOffset] with TAD[newOffset] if (tadEWS == 1) { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) { T oldX = rX[i]; rX[i] = rY[i]; zY[i] = oldX; } } else { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo[f], tadLength); auto yOffset = newOffset + xOffset; xOffset += oldOffset; T oldX = x[xOffset]; z[xOffset] = x[yOffset]; z[yOffset] = oldX; } } } } } __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execShuffleKernel(void **vdX, Nd4jLong **xShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { shuffleKernel<T>(vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void shuffleKernelGeneric(dim3 &launchDims, hipStream_t *stream, void **vdX, Nd4jLong **xShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { hipLaunchKernelGGL(( execShuffleKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT shuffleKernelGeneric, (dim3 & launchDims, hipStream_t * stream, void * *vdX, Nd4jLong * *xShapeInfo, void **vdZ, int N, int * shuffleMap, Nd4jLong * *tadOnlyShapeInfo, Nd4jLong * *tadOffsets), LIBND4J_TYPES); }
8dd7216941a20b74ebed5e5113f16016c4a9bc2c.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // @author Yurii Shyrma, created on 15.11.2018 // #include <loops/special_kernels.h> namespace nd4j { //////////////////////////////////////////////////////////////////////// template<typename T> __device__ void shuffleKernel(void **vdX, Nd4jLong **dxShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { // we assume that shuffle map for each X contains pair TAD Y auto dX = reinterpret_cast<T **>(vdX); auto dZ = reinterpret_cast<T **>(vdZ); __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; __shared__ Nd4jLong* xShapeInfo; __shared__ Nd4jLong xLength; for (int f = 0; f < N; f++) { auto x = reinterpret_cast<T *>(dX[f]); auto z = reinterpret_cast<T *>(dZ[f]); if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo[f]); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]); xShapeInfo = dxShapeInfo[f]; xLength = shape::length(xShapeInfo); numTads = xLength / tadLength; } __syncthreads(); if (shape::rank(xShapeInfo) == 1) { uint tid = threadIdx.x + blockIdx.x * gridDim.x; for (uint r = tid; r < xLength; r += gridDim.x * blockDim.x) { auto swapIndex = shuffleMap[r]; if (swapIndex >= 0) { uint idx = r * tadEWS; uint swap = swapIndex * tadEWS; T oldX = x[idx]; z[idx] = x[swap]; z[swap] = oldX; } } } else { // we roll over the pairs of TADs, thus limit is numTads / 2 for (uint r = blockIdx.x; r < numTads; r += gridDim.x) { if (shuffleMap[r] >= 0) { auto oldOffset = tadOffsets[f][r]; auto newOffset = tadOffsets[f][shuffleMap[r]]; auto rX = x + oldOffset; auto rY = x + newOffset; auto zX = z + oldOffset; auto zY = z + newOffset; // so we're going to change TAD[oldOffset] with TAD[newOffset] if (tadEWS == 1) { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) { T oldX = rX[i]; rX[i] = rY[i]; zY[i] = oldX; } } else { for (Nd4jLong i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo[f], tadLength); auto yOffset = newOffset + xOffset; xOffset += oldOffset; T oldX = x[xOffset]; z[xOffset] = x[yOffset]; z[yOffset] = oldX; } } } } } __syncthreads(); } } //////////////////////////////////////////////////////////////////////// template<typename T> __global__ void execShuffleKernel(void **vdX, Nd4jLong **xShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { shuffleKernel<T>(vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } //////////////////////////////////////////////////////////////////////// template<typename T> __host__ void shuffleKernelGeneric(dim3 &launchDims, cudaStream_t *stream, void **vdX, Nd4jLong **xShapeInfo, void **vdZ, int N, int *shuffleMap, Nd4jLong **tadOnlyShapeInfo, Nd4jLong **tadOffsets) { execShuffleKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(vdX, xShapeInfo, vdZ, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } BUILD_SINGLE_TEMPLATE(template void ND4J_EXPORT shuffleKernelGeneric, (dim3 & launchDims, cudaStream_t * stream, void * *vdX, Nd4jLong * *xShapeInfo, void **vdZ, int N, int * shuffleMap, Nd4jLong * *tadOnlyShapeInfo, Nd4jLong * *tadOffsets), LIBND4J_TYPES); }
7667c3111e01b915009c26ee85d33b90744446bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "engine.h" /* __global__ void gp_add_edge(double * d_grid, double * h_grid, double *x, double *y, double z, double *x1, double *y1,int col1,int col2,int row){ int index = blockIdx.x * blockDim.x + threadIdx.x; int col_size = col1 + col2; // if((index % col_size) < col1){ //printf("index:%d\n",index); //printf("h_grid-index:%f\n",h_grid[index]); //printf("ola:%d\n",(index / col_size * col1) + index % col1); //printf("index:%d\n",index % 2); d_grid[index] = h_grid[(index / col_size * col1) + index % col1]; //d_grid[(index / col1) * col_size + index % col1] = h_grid[index]; }else{ //printf("lala:%d\n",index / col_size); if(index / col_size == 0){ if(index % 2 == 0){ d_grid[index] = x[(index - col1) / 2]; }else{ d_grid[index] = x1[(index - col1) /2]; } }else if(index / col_size == 1){ if(index % 2 == 0){ d_grid[index] = y[(index - col1) / 2]; }else{ d_grid[index] = y1[(index - col1) /2]; } }else if(index / col_size == 2){ d_grid[index] = z; }else{ d_grid[index] = 1; } } //printf("index:%d\n",index); } struct Edge add_circle(struct Edge mx, double x, double y, double z, double r){ double t = 0; double xx = r * cos(t) + x; double yy = r * sin(t) + y; //t += t_step * (2 * M_PI); double * x1; double * y1; double * x2; double * y2; int i; while(t <= (2 * M_PI) + t_step){ double xxx = r * cos(t) + x; double yyy = r * sin(t) + y; //mx = ed_adde(mx,xx,yy,z,xxx,yyy,z); x1[i] = xx; y1[i] = yy; x2[i] = xxx; y2[i] = yyy; xx = xxx; yy = yyy; t = t + t_step * (2 * M_PI); } return mx; } /*I give up struct Matrix bezier(struct Matrix mx, double x1, double y1, double x2, double y2, double x3. double y3, double x4, double y4){ double t = 0; struct Matrix curve; curve = mx_init(4,4); mx_set(curve,0,0,1); mx_set(curve,1,3,1); mx_set(curve,2,0,-3); mx_set(curve,2,1,3); mx_set(curve,3,2,-3); mx_set(curve,3,3,3); } */ /* // The stupid way to do the bezier struct Edge add_bezier(struct Edge mx, double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4){ //printf("mx.col:%d\n",mx.col); double t = 0; int xx = x1; int yy = y1; double size = (1 + 2 * t_step)/t_step; //printf("size:%f\n",size); double d_size = size * sizeof(double); double col_size = mx.col * sizeof(double); int grid_size = (mx.row * mx.col) * sizeof(double); double * h_x; double * h_y; double * h_x1; double * h_y1; h_x = (double *)malloc(d_size); h_y = (double *)malloc(d_size); h_x1 = (double *)malloc(d_size); h_y1 = (double *)malloc(d_size); double * d_x; double * d_y; double * d_x1; double * d_y1; double * d_grid; double * h_grid; hipMalloc((void **)&d_x,d_size); hipMalloc((void **)&d_y,d_size); hipMalloc((void **)&d_x1,d_size); hipMalloc((void **)&d_y1,d_size); hipMalloc((void **)&h_grid,grid_size); hipMalloc((void **)&d_grid,(mx.col + 2 * size) * (mx.row) * sizeof(double)); int i = 0; int malloc_size = (mx.col + size) * (mx.row) * sizeof(double); while(t <= 1 + t_step){ double xxx = (1-t) * ((1-t) * ((1-t) * x1 + t * x2) + t * ((1-t) * x2 + t * x3)) + t * ((1-t) * ((1-t) * x2 + t * x3) + t * ((1-t) * x3 + t * x4)); double yyy = (1-t) * ((1-t) * ((1-t) * y1 + t * y2) + t * ((1-t) * y2 + t * y3)) + t * ((1-t) * ((1-t) * y2 + t * y3) + t * ((1-t) * y3 + t * y4)); //mx = ed_adde(mx,xx,yy,0,xxx,yyy,0); h_x[i] = xx; h_y[i] = yy; h_x1[i] = xxx; h_y1[i] = yyy; printf("h_x:%f\n",h_x[i]); printf("h_x1:%f\n",h_x1[i]); xx = xxx; yy = yyy; t = t + t_step; i++; } hipMemcpy(d_x,h_x,d_size,hipMemcpyHostToDevice); hipMemcpy(d_y,h_y,d_size,hipMemcpyHostToDevice); hipMemcpy(d_x1,h_x1,d_size,hipMemcpyHostToDevice); hipMemcpy(d_y1,h_y1,d_size,hipMemcpyHostToDevice); hipMemcpy(h_grid,mx.grid,grid_size,hipMemcpyHostToDevice); //hipMemcpy(d_grid,,col_size,hipMemcpyHostToDevice); gp_add_edge<<<mx.col + 2 * size,mx.row>>>(d_grid,h_grid, d_x, d_y, 0, d_x1, d_y1,mx.col,2 * size,mx.row); ed_free(mx); mx = ed_init(mx,mx.row,mx.col + (2 * size)); hipDeviceSynchronize(); hipMemcpy(mx.grid,d_grid,malloc_size,hipMemcpyDeviceToHost); ed_print(mx); //ed_print(mx); free(h_x); free(h_y); free(h_x1); free(h_y1); hipFree(d_x); hipFree(d_x1); hipFree(d_y); hipFree(d_y1); hipFree(d_grid); hipFree(h_grid); return mx; } //*/ /* struct Edge add_hermite(struct Edge mx, double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4){ double t = t_step; double xx = x1; double yy = y1; while(t <= 1 + t_step){ double xxx = x1 * (2 * pow(t,3) - 3 * pow(t,2) + 1) + x3 * (pow(t,3) -pow(t,2)) + x2 * (pow(t,3) - 2 * pow(t,2) + t) + x4 * (3 * pow(t,2) - 2 * pow(t,3)); double yyy = y1 * (2 * pow(t,3) - 3 * pow(t,2) + 1) + y3 * (pow(t,3) -pow(t,2)) + y2 * (pow(t,3) - 2 * pow(t,2) + t) + y4 * (3 * pow(t,2) - 2 * pow(t,3)); mx = ed_adde(mx,xx,yy,0,xxx,yyy,0); xx = xxx; yy = yyy; t += t_step; } return mx; } */
7667c3111e01b915009c26ee85d33b90744446bd.cu
#include "engine.h" /* __global__ void gp_add_edge(double * d_grid, double * h_grid, double *x, double *y, double z, double *x1, double *y1,int col1,int col2,int row){ int index = blockIdx.x * blockDim.x + threadIdx.x; int col_size = col1 + col2; // if((index % col_size) < col1){ //printf("index:%d\n",index); //printf("h_grid-index:%f\n",h_grid[index]); //printf("ola:%d\n",(index / col_size * col1) + index % col1); //printf("index:%d\n",index % 2); d_grid[index] = h_grid[(index / col_size * col1) + index % col1]; //d_grid[(index / col1) * col_size + index % col1] = h_grid[index]; }else{ //printf("lala:%d\n",index / col_size); if(index / col_size == 0){ if(index % 2 == 0){ d_grid[index] = x[(index - col1) / 2]; }else{ d_grid[index] = x1[(index - col1) /2]; } }else if(index / col_size == 1){ if(index % 2 == 0){ d_grid[index] = y[(index - col1) / 2]; }else{ d_grid[index] = y1[(index - col1) /2]; } }else if(index / col_size == 2){ d_grid[index] = z; }else{ d_grid[index] = 1; } } //printf("index:%d\n",index); } struct Edge add_circle(struct Edge mx, double x, double y, double z, double r){ double t = 0; double xx = r * cos(t) + x; double yy = r * sin(t) + y; //t += t_step * (2 * M_PI); double * x1; double * y1; double * x2; double * y2; int i; while(t <= (2 * M_PI) + t_step){ double xxx = r * cos(t) + x; double yyy = r * sin(t) + y; //mx = ed_adde(mx,xx,yy,z,xxx,yyy,z); x1[i] = xx; y1[i] = yy; x2[i] = xxx; y2[i] = yyy; xx = xxx; yy = yyy; t = t + t_step * (2 * M_PI); } return mx; } /*I give up struct Matrix bezier(struct Matrix mx, double x1, double y1, double x2, double y2, double x3. double y3, double x4, double y4){ double t = 0; struct Matrix curve; curve = mx_init(4,4); mx_set(curve,0,0,1); mx_set(curve,1,3,1); mx_set(curve,2,0,-3); mx_set(curve,2,1,3); mx_set(curve,3,2,-3); mx_set(curve,3,3,3); } */ /* // The stupid way to do the bezier struct Edge add_bezier(struct Edge mx, double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4){ //printf("mx.col:%d\n",mx.col); double t = 0; int xx = x1; int yy = y1; double size = (1 + 2 * t_step)/t_step; //printf("size:%f\n",size); double d_size = size * sizeof(double); double col_size = mx.col * sizeof(double); int grid_size = (mx.row * mx.col) * sizeof(double); double * h_x; double * h_y; double * h_x1; double * h_y1; h_x = (double *)malloc(d_size); h_y = (double *)malloc(d_size); h_x1 = (double *)malloc(d_size); h_y1 = (double *)malloc(d_size); double * d_x; double * d_y; double * d_x1; double * d_y1; double * d_grid; double * h_grid; cudaMalloc((void **)&d_x,d_size); cudaMalloc((void **)&d_y,d_size); cudaMalloc((void **)&d_x1,d_size); cudaMalloc((void **)&d_y1,d_size); cudaMalloc((void **)&h_grid,grid_size); cudaMalloc((void **)&d_grid,(mx.col + 2 * size) * (mx.row) * sizeof(double)); int i = 0; int malloc_size = (mx.col + size) * (mx.row) * sizeof(double); while(t <= 1 + t_step){ double xxx = (1-t) * ((1-t) * ((1-t) * x1 + t * x2) + t * ((1-t) * x2 + t * x3)) + t * ((1-t) * ((1-t) * x2 + t * x3) + t * ((1-t) * x3 + t * x4)); double yyy = (1-t) * ((1-t) * ((1-t) * y1 + t * y2) + t * ((1-t) * y2 + t * y3)) + t * ((1-t) * ((1-t) * y2 + t * y3) + t * ((1-t) * y3 + t * y4)); //mx = ed_adde(mx,xx,yy,0,xxx,yyy,0); h_x[i] = xx; h_y[i] = yy; h_x1[i] = xxx; h_y1[i] = yyy; printf("h_x:%f\n",h_x[i]); printf("h_x1:%f\n",h_x1[i]); xx = xxx; yy = yyy; t = t + t_step; i++; } cudaMemcpy(d_x,h_x,d_size,cudaMemcpyHostToDevice); cudaMemcpy(d_y,h_y,d_size,cudaMemcpyHostToDevice); cudaMemcpy(d_x1,h_x1,d_size,cudaMemcpyHostToDevice); cudaMemcpy(d_y1,h_y1,d_size,cudaMemcpyHostToDevice); cudaMemcpy(h_grid,mx.grid,grid_size,cudaMemcpyHostToDevice); //cudaMemcpy(d_grid,,col_size,cudaMemcpyHostToDevice); gp_add_edge<<<mx.col + 2 * size,mx.row>>>(d_grid,h_grid, d_x, d_y, 0, d_x1, d_y1,mx.col,2 * size,mx.row); ed_free(mx); mx = ed_init(mx,mx.row,mx.col + (2 * size)); cudaDeviceSynchronize(); cudaMemcpy(mx.grid,d_grid,malloc_size,cudaMemcpyDeviceToHost); ed_print(mx); //ed_print(mx); free(h_x); free(h_y); free(h_x1); free(h_y1); cudaFree(d_x); cudaFree(d_x1); cudaFree(d_y); cudaFree(d_y1); cudaFree(d_grid); cudaFree(h_grid); return mx; } //*/ /* struct Edge add_hermite(struct Edge mx, double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4){ double t = t_step; double xx = x1; double yy = y1; while(t <= 1 + t_step){ double xxx = x1 * (2 * pow(t,3) - 3 * pow(t,2) + 1) + x3 * (pow(t,3) -pow(t,2)) + x2 * (pow(t,3) - 2 * pow(t,2) + t) + x4 * (3 * pow(t,2) - 2 * pow(t,3)); double yyy = y1 * (2 * pow(t,3) - 3 * pow(t,2) + 1) + y3 * (pow(t,3) -pow(t,2)) + y2 * (pow(t,3) - 2 * pow(t,2) + t) + y4 * (3 * pow(t,2) - 2 * pow(t,3)); mx = ed_adde(mx,xx,yy,0,xxx,yyy,0); xx = xxx; yy = yyy; t += t_step; } return mx; } */
3833fb46f4419df1da395f4f52d833c85bfaeb65.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
3833fb46f4419df1da395f4f52d833c85bfaeb65.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::ColumnMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 8>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
3c61dcb409beba203ab2b1af90beb151758fe475.hip
// !!! This is a file automatically generated by hipify!!! /*The parallel CUDA code for 2D Ising Model simulation using Metropolis Monte Carlo algorithm In this implementation, the random numbers are generated on CPU side. When you install the CUDA environment, you can compile the CUDA code in linux terminal directly: nvcc ising1.cu -o ising1 */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> // the 2D block size #define BDIMX 8 #define BDIMY 1 // Monte Carlo sweeps: N Monte Carlo steps - one for each spins, on average #define sweeps1 6000 #define sweeps2 3000 // function create initial spins on a lattice void InitialSpins(int *spins, int N, float msg) { int i; float R; for (i = 0; i < N; i++) { R = rand() / (float)(RAND_MAX); if (R < msg) { spins[i] = 1; } else { spins[i] = -1; } } } // linSpace Temperature void linSpaceTemperature(float start, float end, int n, float *Temperature) { int i; float h = (end - start) / (n - 1); for (i = 0; i < n; i++) { Temperature[i] = start + i * h; } } // set the random number generator void RandGenerator(float *random, int N) { int i; for (i = 0; i < N; i++) { random[i] = rand() / (float)(RAND_MAX); } } /* declare global variable on GPU */ // variables for temporarily storing the properties of one step __device__ int d_m; __device__ int d_e; // variables for summing over all the properties of every step __device__ int d_M; __device__ int d_E; // variables for specific heat and magnetic susceptibility __device__ float d_M2; __device__ float d_E2; // calculate the properties __global__ void CalcProp(int *energy, int *spins, int size) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int idx = iy * nx + ix; // calculate the properties of the present configuration atomicAdd(&d_m, spins[idx]); atomicAdd(&d_e, energy[idx]); if (idx == 0) { d_M += abs(d_m); d_E += d_e; d_E2 += (((float)d_e)*d_e)/ (2.0f * 2.0f); d_M2 += (((float)d_m)*d_m); d_m = 0; d_e = 0; } } // reset the variables after every temperature iteration __global__ void reset() { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int idx = iy * nx + ix; if (idx == 0) { d_M = 0; d_E = 0; d_M2 = 0.; d_E2 = 0.; } } // flip spins using Metropolis algorithm __global__ void MetropolisDevice_even(int *spins, int *energy, float *random, const float Beta) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int idx = iy * nx + ix; float rand = random[idx]; int dE; int left, right, up, down; // place the value to neighbours with boundary conditions if (ix == 0) { left = spins[idx + nx - 1]; } else { left = spins[idx - 1]; } if (ix == (ny - 1)) { right = spins[idx - nx + 1]; } else { right = spins[idx + 1]; } if (iy == 0) { up = spins[idx + (ny - 1) * nx]; } else { up = spins[idx - nx]; } if (iy == nx - 1) { down = spins[idx - (ny - 1) * nx]; } else { down = spins[idx + nx]; } if ((ix + iy) % 2 == 0) //flip even spins { energy[idx] = -spins[idx] * (left + right + up + down); dE = -2 * energy[idx]; if (dE < 0 || exp(-dE * Beta) > rand) { spins[idx] *= -1; energy[idx] *= -1; } } } __global__ void MetropolisDevice_odd(int *spins, int *energy, float *random, const float Beta) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int idx = iy * nx + ix; float rand = random[idx]; float dE; int left, right, up, down; // place the value to neighbours with boundary conditions if (ix == 0) { left = spins[idx + nx - 1]; } else { left = spins[idx - 1]; } if (ix == ny - 1) { right = spins[idx - nx + 1]; } else { right = spins[idx + 1]; } if (iy == 0) { up = spins[idx + (ny - 1) * nx]; } else { up = spins[idx - nx]; } if (iy == nx - 1) { down = spins[idx - (ny - 1) * nx]; } else { down = spins[idx + nx]; } if ((ix + iy) % 2 != 0) //flip odd spins { energy[idx] = -spins[idx] * (left + right + up + down); dE = -2 * (float)energy[idx]; if (dE < 0 || exp(-dE * Beta) > rand) { spins[idx] *= -1; energy[idx] *= -1; } } } int main() { //latice size int size = 8; printf("CUDA program\n"); printf("\n%d x %d size latice \n", size, size); printf("The random numbers are generated on CPU side\n"); int i, n; // iteration variables float Beta; // beta = 1/T, in this project set k = 1, J = 1. // massage to define the initial configuration. setting msg = 0.5 to random configuration. setting msg = 0 to orientated configuration. float msg = 0.5; //temperature intervel int numberTemperature = 45; // number of temperatures sampled float *Temperature = (float*)malloc(numberTemperature * sizeof(float)); linSpaceTemperature(0.5, 5.0, numberTemperature, Temperature); printf("\nTemperature range 0.5 to 5.0\n"); // averege energy and magnetization per spin float *avergEnergy = (float*)malloc(numberTemperature * sizeof(float)); float *avergMag = (float*)malloc(numberTemperature * sizeof(float)); // variables for calculate specific heat and magnetic susceptibility float *avergEnergy2 = (float*)malloc(numberTemperature * sizeof(float)); float *avergMag2 = (float*)malloc(numberTemperature * sizeof(float)); // specific heat and magnetic susceptibility float *heat = (float*)malloc(numberTemperature * sizeof(float)); float *sus = (float*)malloc(numberTemperature * sizeof(float)); // declare variables and allocate memory int *d_spins; int *h_spins; int *d_energy; int *h_energy; int *gpuRef; // results return from GPU float *h_random_numbers; float *d_random_numbers; int nxy = size * size; int nBytes = nxy * sizeof(int); int NBytes = nxy * sizeof(float); h_spins = (int *)malloc(nBytes); h_energy = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); h_random_numbers = (float *)malloc(NBytes); //set random number generator seed srand(123456); // initialize data at host side memset(gpuRef, 0, nBytes); memset(h_energy, 0, nBytes); InitialSpins(h_spins, nxy,msg); // malloc device global memory hipMalloc((void **)&d_spins, nBytes); hipMalloc((void **)&d_energy, nBytes); hipMalloc((void **)&d_random_numbers, NBytes); // transfer data from host to device int h_m = 0; int h_e = 0; int h_M = 0; int h_E = 0; float h_M2 = 0.0f; float h_E2 = 0.0f; hipMemcpy(d_spins, h_spins, nBytes, hipMemcpyHostToDevice); hipMemcpy(d_energy, h_energy, nBytes, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_M, &h_M, sizeof(int)); hipMemcpyToSymbol(d_E, &h_E, sizeof(int)); hipMemcpyToSymbol(d_m, &h_m, sizeof(int)); hipMemcpyToSymbol(d_e, &h_e, sizeof(int)); hipMemcpyToSymbol(d_M2, &h_M2, sizeof(float)); hipMemcpyToSymbol(d_E2, &h_E2, sizeof(float)); // invoke kernel at host side dim3 block(BDIMX, BDIMY); dim3 grid(size / BDIMX, size / BDIMY); // timing clock_t d_start, d_end; double d_time_used; d_start = clock(); printf("\nMain loop starting...\n"); // main loop (loop over the temerature) for (n = 0; n < numberTemperature; n++) { Beta = 1 / Temperature[n]; // process for equilibrium for (i = 0; i < sweeps1; i++) { RandGenerator(h_random_numbers, nxy); hipMemcpy(d_random_numbers, h_random_numbers, nBytes, hipMemcpyHostToDevice); MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); } // process for calculating the properties for (i = 0; i < sweeps2; i++) { RandGenerator(h_random_numbers, nxy); hipMemcpy(d_random_numbers, h_random_numbers, nBytes, hipMemcpyHostToDevice); MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); //printf("Temperature %.3f Iteration %d\n", Temperature[n], i + 1); hipLaunchKernelGGL(( CalcProp) , dim3(grid), dim3(block) , 0, 0, d_energy, d_spins,size); //hipDeviceSynchronize(); } hipMemcpyFromSymbol(&h_M, d_M, sizeof(int)); hipMemcpyFromSymbol(&h_E, d_E, sizeof(int)); hipMemcpyFromSymbol(&h_M2, d_M2, sizeof(float)); hipMemcpyFromSymbol(&h_E2, d_E2, sizeof(float)); // calculate the average propeties per spin avergEnergy[n] = h_E / ((sweeps2 )*((float)(size*size))*2.0f); avergMag[n] = h_M / ((sweeps2 )*((float)(size*size))); avergEnergy2[n] = h_E2 / ((sweeps2 )); avergMag2[n] = h_M2 / ((sweeps2)); heat[n] = (avergEnergy2[n]/ ((float)(size*size)) - avergEnergy[n] * avergEnergy[n]*((size*size)))*Beta*Beta; sus[n] = (avergMag2[n]/ ((float)(size*size)) - avergMag[n] * avergMag[n]*(size*size))*Beta; reset << <grid, block >> > (); } d_end = clock(); hipMemcpy(gpuRef, d_spins, nBytes, hipMemcpyDeviceToHost); d_time_used = ((double)(d_end - d_start)) / CLOCKS_PER_SEC; printf("\nEnd main loop.\nTotal time using GPU %f s\n", d_time_used); // deallocate the GPU memory hipFree(d_random_numbers); hipFree(d_spins); hipFree(d_energy); hipDeviceReset(); FILE *properties; properties = fopen("Properties_CUDA1.txt", "a"); fprintf(properties, "%d x %d size lattice\n", size, size); fprintf(properties, "\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n"); for (i = 0; i < numberTemperature; i++) { fprintf(properties, "%.2f %.3f %.3f %.3f %.3f \n", Temperature[i], avergEnergy[i], \ avergMag[i], heat[i], sus[i]); } fclose(properties); // print out the properties printf("\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n"); for (i = 0; i < numberTemperature; i++) { printf("%.2f %.3f %.3f %.3f %.3f \n", \ Temperature[i], avergEnergy[i], \ avergMag[i], heat[i], sus[i]); } // deallocate the memory free(h_spins); free(h_random_numbers); free(Temperature); free(h_energy); printf("end\n"); return 0; }
3c61dcb409beba203ab2b1af90beb151758fe475.cu
/*The parallel CUDA code for 2D Ising Model simulation using Metropolis Monte Carlo algorithm In this implementation, the random numbers are generated on CPU side. When you install the CUDA environment, you can compile the CUDA code in linux terminal directly: nvcc ising1.cu -o ising1 */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <curand_kernel.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <string.h> // the 2D block size #define BDIMX 8 #define BDIMY 1 // Monte Carlo sweeps: N Monte Carlo steps - one for each spins, on average #define sweeps1 6000 #define sweeps2 3000 // function create initial spins on a lattice void InitialSpins(int *spins, int N, float msg) { int i; float R; for (i = 0; i < N; i++) { R = rand() / (float)(RAND_MAX); if (R < msg) { spins[i] = 1; } else { spins[i] = -1; } } } // linSpace Temperature void linSpaceTemperature(float start, float end, int n, float *Temperature) { int i; float h = (end - start) / (n - 1); for (i = 0; i < n; i++) { Temperature[i] = start + i * h; } } // set the random number generator void RandGenerator(float *random, int N) { int i; for (i = 0; i < N; i++) { random[i] = rand() / (float)(RAND_MAX); } } /* declare global variable on GPU */ // variables for temporarily storing the properties of one step __device__ int d_m; __device__ int d_e; // variables for summing over all the properties of every step __device__ int d_M; __device__ int d_E; // variables for specific heat and magnetic susceptibility __device__ float d_M2; __device__ float d_E2; // calculate the properties __global__ void CalcProp(int *energy, int *spins, int size) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int idx = iy * nx + ix; // calculate the properties of the present configuration atomicAdd(&d_m, spins[idx]); atomicAdd(&d_e, energy[idx]); if (idx == 0) { d_M += abs(d_m); d_E += d_e; d_E2 += (((float)d_e)*d_e)/ (2.0f * 2.0f); d_M2 += (((float)d_m)*d_m); d_m = 0; d_e = 0; } } // reset the variables after every temperature iteration __global__ void reset() { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int idx = iy * nx + ix; if (idx == 0) { d_M = 0; d_E = 0; d_M2 = 0.; d_E2 = 0.; } } // flip spins using Metropolis algorithm __global__ void MetropolisDevice_even(int *spins, int *energy, float *random, const float Beta) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int idx = iy * nx + ix; float rand = random[idx]; int dE; int left, right, up, down; // place the value to neighbours with boundary conditions if (ix == 0) { left = spins[idx + nx - 1]; } else { left = spins[idx - 1]; } if (ix == (ny - 1)) { right = spins[idx - nx + 1]; } else { right = spins[idx + 1]; } if (iy == 0) { up = spins[idx + (ny - 1) * nx]; } else { up = spins[idx - nx]; } if (iy == nx - 1) { down = spins[idx - (ny - 1) * nx]; } else { down = spins[idx + nx]; } if ((ix + iy) % 2 == 0) //flip even spins { energy[idx] = -spins[idx] * (left + right + up + down); dE = -2 * energy[idx]; if (dE < 0 || exp(-dE * Beta) > rand) { spins[idx] *= -1; energy[idx] *= -1; } } } __global__ void MetropolisDevice_odd(int *spins, int *energy, float *random, const float Beta) { // map the threads to the global memory int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; int nx = blockDim.x * gridDim.x; int ny = blockDim.y * gridDim.y; int idx = iy * nx + ix; float rand = random[idx]; float dE; int left, right, up, down; // place the value to neighbours with boundary conditions if (ix == 0) { left = spins[idx + nx - 1]; } else { left = spins[idx - 1]; } if (ix == ny - 1) { right = spins[idx - nx + 1]; } else { right = spins[idx + 1]; } if (iy == 0) { up = spins[idx + (ny - 1) * nx]; } else { up = spins[idx - nx]; } if (iy == nx - 1) { down = spins[idx - (ny - 1) * nx]; } else { down = spins[idx + nx]; } if ((ix + iy) % 2 != 0) //flip odd spins { energy[idx] = -spins[idx] * (left + right + up + down); dE = -2 * (float)energy[idx]; if (dE < 0 || exp(-dE * Beta) > rand) { spins[idx] *= -1; energy[idx] *= -1; } } } int main() { //latice size int size = 8; printf("CUDA program\n"); printf("\n%d x %d size latice \n", size, size); printf("The random numbers are generated on CPU side\n"); int i, n; // iteration variables float Beta; // beta = 1/T, in this project set k = 1, J = 1. // massage to define the initial configuration. setting msg = 0.5 to random configuration. setting msg = 0 to orientated configuration. float msg = 0.5; //temperature intervel int numberTemperature = 45; // number of temperatures sampled float *Temperature = (float*)malloc(numberTemperature * sizeof(float)); linSpaceTemperature(0.5, 5.0, numberTemperature, Temperature); printf("\nTemperature range 0.5 to 5.0\n"); // averege energy and magnetization per spin float *avergEnergy = (float*)malloc(numberTemperature * sizeof(float)); float *avergMag = (float*)malloc(numberTemperature * sizeof(float)); // variables for calculate specific heat and magnetic susceptibility float *avergEnergy2 = (float*)malloc(numberTemperature * sizeof(float)); float *avergMag2 = (float*)malloc(numberTemperature * sizeof(float)); // specific heat and magnetic susceptibility float *heat = (float*)malloc(numberTemperature * sizeof(float)); float *sus = (float*)malloc(numberTemperature * sizeof(float)); // declare variables and allocate memory int *d_spins; int *h_spins; int *d_energy; int *h_energy; int *gpuRef; // results return from GPU float *h_random_numbers; float *d_random_numbers; int nxy = size * size; int nBytes = nxy * sizeof(int); int NBytes = nxy * sizeof(float); h_spins = (int *)malloc(nBytes); h_energy = (int *)malloc(nBytes); gpuRef = (int *)malloc(nBytes); h_random_numbers = (float *)malloc(NBytes); //set random number generator seed srand(123456); // initialize data at host side memset(gpuRef, 0, nBytes); memset(h_energy, 0, nBytes); InitialSpins(h_spins, nxy,msg); // malloc device global memory cudaMalloc((void **)&d_spins, nBytes); cudaMalloc((void **)&d_energy, nBytes); cudaMalloc((void **)&d_random_numbers, NBytes); // transfer data from host to device int h_m = 0; int h_e = 0; int h_M = 0; int h_E = 0; float h_M2 = 0.0f; float h_E2 = 0.0f; cudaMemcpy(d_spins, h_spins, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_energy, h_energy, nBytes, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_M, &h_M, sizeof(int)); cudaMemcpyToSymbol(d_E, &h_E, sizeof(int)); cudaMemcpyToSymbol(d_m, &h_m, sizeof(int)); cudaMemcpyToSymbol(d_e, &h_e, sizeof(int)); cudaMemcpyToSymbol(d_M2, &h_M2, sizeof(float)); cudaMemcpyToSymbol(d_E2, &h_E2, sizeof(float)); // invoke kernel at host side dim3 block(BDIMX, BDIMY); dim3 grid(size / BDIMX, size / BDIMY); // timing clock_t d_start, d_end; double d_time_used; d_start = clock(); printf("\nMain loop starting...\n"); // main loop (loop over the temerature) for (n = 0; n < numberTemperature; n++) { Beta = 1 / Temperature[n]; // process for equilibrium for (i = 0; i < sweeps1; i++) { RandGenerator(h_random_numbers, nxy); cudaMemcpy(d_random_numbers, h_random_numbers, nBytes, cudaMemcpyHostToDevice); MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); } // process for calculating the properties for (i = 0; i < sweeps2; i++) { RandGenerator(h_random_numbers, nxy); cudaMemcpy(d_random_numbers, h_random_numbers, nBytes, cudaMemcpyHostToDevice); MetropolisDevice_even << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); MetropolisDevice_odd << <grid, block >> > (d_spins, d_energy, d_random_numbers, Beta); //printf("Temperature %.3f Iteration %d\n", Temperature[n], i + 1); CalcProp <<<grid, block >>> (d_energy, d_spins,size); //cudaDeviceSynchronize(); } cudaMemcpyFromSymbol(&h_M, d_M, sizeof(int)); cudaMemcpyFromSymbol(&h_E, d_E, sizeof(int)); cudaMemcpyFromSymbol(&h_M2, d_M2, sizeof(float)); cudaMemcpyFromSymbol(&h_E2, d_E2, sizeof(float)); // calculate the average propeties per spin avergEnergy[n] = h_E / ((sweeps2 )*((float)(size*size))*2.0f); avergMag[n] = h_M / ((sweeps2 )*((float)(size*size))); avergEnergy2[n] = h_E2 / ((sweeps2 )); avergMag2[n] = h_M2 / ((sweeps2)); heat[n] = (avergEnergy2[n]/ ((float)(size*size)) - avergEnergy[n] * avergEnergy[n]*((size*size)))*Beta*Beta; sus[n] = (avergMag2[n]/ ((float)(size*size)) - avergMag[n] * avergMag[n]*(size*size))*Beta; reset << <grid, block >> > (); } d_end = clock(); cudaMemcpy(gpuRef, d_spins, nBytes, cudaMemcpyDeviceToHost); d_time_used = ((double)(d_end - d_start)) / CLOCKS_PER_SEC; printf("\nEnd main loop.\nTotal time using GPU %f s\n", d_time_used); // deallocate the GPU memory cudaFree(d_random_numbers); cudaFree(d_spins); cudaFree(d_energy); cudaDeviceReset(); FILE *properties; properties = fopen("Properties_CUDA1.txt", "a"); fprintf(properties, "%d x %d size lattice\n", size, size); fprintf(properties, "\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n"); for (i = 0; i < numberTemperature; i++) { fprintf(properties, "%.2f %.3f %.3f %.3f %.3f \n", Temperature[i], avergEnergy[i], \ avergMag[i], heat[i], sus[i]); } fclose(properties); // print out the properties printf("\nTemperature Energy Magnetization Specific heat Magnetic susceptibility (per spin)\n"); for (i = 0; i < numberTemperature; i++) { printf("%.2f %.3f %.3f %.3f %.3f \n", \ Temperature[i], avergEnergy[i], \ avergMag[i], heat[i], sus[i]); } // deallocate the memory free(h_spins); free(h_random_numbers); free(Temperature); free(h_energy); printf("end\n"); return 0; }
5c3812ada25b3d0bb54aa7bcf63f643bc7640f28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <kernelized_correlation_filters_gpu/cosine_convolution_kernel.h> __host__ __forceinline__ void cuAssert(hipError_t code, char *file, int line, bool abort) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %dn", hipGetErrorString(code), file, line); if (abort) { exit(code); } } } __host__ __device__ __align__(16) int cuDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __global__ __forceinline__ void cosineConvolutionKernel(float *d_output, const float*d_cnn_codes, const float *d_cos_window, const int data_count) { int t_idx = threadIdx.x + blockIdx.x * blockDim.x; int t_idy = threadIdx.y + blockIdx.y * blockDim.y; int offset = t_idx + t_idy * blockDim.x * gridDim.x; if (offset < data_count) { d_output[offset] = d_cnn_codes[offset] * d_cos_window[offset]; } } float* cosineConvolutionGPU(const float *d_cnn_codes, const float *d_cos_window, const int data_count, const int BYTE) { const int dimension = ::ceil(std::sqrt(data_count)); dim3 grid_size(cuDivUp(dimension, GRID_SIZE), cuDivUp(dimension, GRID_SIZE)); dim3 block_size(GRID_SIZE, GRID_SIZE); float *d_output; hipMalloc(reinterpret_cast<void**>(&d_output), BYTE); hipLaunchKernelGGL(( cosineConvolutionKernel), dim3(grid_size), dim3(block_size), 0, 0, d_output, d_cnn_codes, d_cos_window, data_count); return d_output; } /** * tx1 function */ bool cosineConvolutionGPU(float **d_output, const float *d_cnn_codes, const float *d_cos_window, const int data_count) { const int dimension = ::ceil(std::sqrt(data_count)); dim3 grid_size(cuDivUp(dimension, GRID_SIZE), cuDivUp(dimension, GRID_SIZE)); dim3 block_size(GRID_SIZE, GRID_SIZE); // float *d_output; // hipMalloc(reinterpret_cast<void**>(&d_output), BYTE); hipLaunchKernelGGL(( cosineConvolutionKernel), dim3(grid_size), dim3(block_size), 0, 0, *d_output, d_cnn_codes, d_cos_window, data_count); return true; }
5c3812ada25b3d0bb54aa7bcf63f643bc7640f28.cu
#include <kernelized_correlation_filters_gpu/cosine_convolution_kernel.h> __host__ __forceinline__ void cuAssert(cudaError_t code, char *file, int line, bool abort) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } __host__ __device__ __align__(16) int cuDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __global__ __forceinline__ void cosineConvolutionKernel(float *d_output, const float*d_cnn_codes, const float *d_cos_window, const int data_count) { int t_idx = threadIdx.x + blockIdx.x * blockDim.x; int t_idy = threadIdx.y + blockIdx.y * blockDim.y; int offset = t_idx + t_idy * blockDim.x * gridDim.x; if (offset < data_count) { d_output[offset] = d_cnn_codes[offset] * d_cos_window[offset]; } } float* cosineConvolutionGPU(const float *d_cnn_codes, const float *d_cos_window, const int data_count, const int BYTE) { const int dimension = std::ceil(std::sqrt(data_count)); dim3 grid_size(cuDivUp(dimension, GRID_SIZE), cuDivUp(dimension, GRID_SIZE)); dim3 block_size(GRID_SIZE, GRID_SIZE); float *d_output; cudaMalloc(reinterpret_cast<void**>(&d_output), BYTE); cosineConvolutionKernel<<<grid_size, block_size>>>( d_output, d_cnn_codes, d_cos_window, data_count); return d_output; } /** * tx1 function */ bool cosineConvolutionGPU(float **d_output, const float *d_cnn_codes, const float *d_cos_window, const int data_count) { const int dimension = std::ceil(std::sqrt(data_count)); dim3 grid_size(cuDivUp(dimension, GRID_SIZE), cuDivUp(dimension, GRID_SIZE)); dim3 block_size(GRID_SIZE, GRID_SIZE); // float *d_output; // cudaMalloc(reinterpret_cast<void**>(&d_output), BYTE); cosineConvolutionKernel<<<grid_size, block_size>>>( *d_output, d_cnn_codes, d_cos_window, data_count); return true; }
2c21643bb7f683acf693987b00064924a3e1589e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/device/device.h" // (in library path specified to compiler) needed by for device functions #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu) { //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 // timer long long time0; long long time1; time0 = get_time(); //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 hipDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; hipStream_t stream1, stream2, stream3, stream4; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipMalloc( (void **)&d_box_gpu, dim_cpu.box_mem); //==================================================50 // rv //==================================================50 hipMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem); //==================================================50 // qv //==================================================50 hipMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipMemcpyAsync( d_box_gpu, box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice, stream1); //==================================================50 // rv //==================================================50 hipMemcpyAsync( d_rv_gpu, rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice, stream2); //==================================================50 // qv //==================================================50 hipMemcpyAsync( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice, stream3); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMemcpyAsync( d_fv_gpu, fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice, stream4); hipDeviceSynchronize(); //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, 0, par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); checkCUDAError("Start"); hipDeviceSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.) //======================================================================================================================================================150 hipMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, hipMemcpyDeviceToHost); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 hipFree(d_rv_gpu); hipFree(d_qv_gpu); hipFree(d_fv_gpu); hipFree(d_box_gpu); time1 = get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Total time:\n"); printf("%.12f s\n", (float) (time1-time0) / 1000000); hipStreamDestroy(stream1); hipStreamDestroy(stream2); hipStreamDestroy(stream3); hipStreamDestroy(stream4); }
2c21643bb7f683acf693987b00064924a3e1589e.cu
//========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/device/device.h" // (in library path specified to compiler) needed by for device functions #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu) { //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 // timer long long time0; long long time1; time0 = get_time(); //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 cudaDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; cudaStream_t stream1, stream2, stream3, stream4; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaMalloc( (void **)&d_box_gpu, dim_cpu.box_mem); //==================================================50 // rv //==================================================50 cudaMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem); //==================================================50 // qv //==================================================50 cudaMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaMemcpyAsync( d_box_gpu, box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice, stream1); //==================================================50 // rv //==================================================50 cudaMemcpyAsync( d_rv_gpu, rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice, stream2); //==================================================50 // qv //==================================================50 cudaMemcpyAsync( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice, stream3); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMemcpyAsync( d_fv_gpu, fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice, stream4); cudaDeviceSynchronize(); //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes kernel_gpu_cuda<<<blocks, threads>>>( par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); checkCUDAError("Start"); cudaDeviceSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.) //======================================================================================================================================================150 cudaMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, cudaMemcpyDeviceToHost); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 cudaFree(d_rv_gpu); cudaFree(d_qv_gpu); cudaFree(d_fv_gpu); cudaFree(d_box_gpu); time1 = get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Total time:\n"); printf("%.12f s\n", (float) (time1-time0) / 1000000); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaStreamDestroy(stream3); cudaStreamDestroy(stream4); }
952ae7011fd78ec1746ddd878b27391207012245.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include <fstream> #include <limits.h> #include <float.h> #include <hip/hip_runtime_api.h> #include "helper.h" ///////////////////////////////////////////////////////////////////////// // Init ///////////////////////////////////////////////////////////////////////// uint64_t noOfTimeSeries = 20; uint64_t lenOfTimeSeries = 70; uint64_t noOfTestTimeSeries = 10; ///////////////////////////////////////////////////////////////////////// void usage(){ printf("********************************\n"); printf("************* USAGE ************\n"); printf("********************************\n"); printf("./knn [training-file] [number-of-time-series] [length-of-time-series] [testing-file] [number-of-times-series-in-test] [window_size]\n"); printf("eg. ./knn SonyAIBORobotSurface_TRAIN 20 70 \n"); printf("********************************\n"); } void readfile(char* inputFileName,float* _data,int* _class,uint64_t len) { std::ifstream in_file; in_file.open(inputFileName); if(!in_file) { printf("\nFile Not Found !"); exit(1); } float class_in; float data_in; long int i, j; for(i=0; i<len; i++) { in_file >> class_in; _class[i] = (int)class_in; //printf("class : %d\n",_class[i]); for (j=0; j<lenOfTimeSeries; j++) { in_file >> data_in; _data[i*lenOfTimeSeries+j] = data_in; //printf("%f, ",_data[i*lenOfTimeSeries+j]); } //printf("\n"); } in_file.close(); } //////////////////////////////////////////////////////////////// __global__ void NormalizeTimeSeries(float* series,const int totalTimeSeries, const int length, float* output){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < totalTimeSeries){ int i=0; float sum = 0, sum_sqr = 0, mean = 0, mean_sqr = 0, variance = 0, std_dev = 0; int t = idx * length; //printf("Normalize Train Series #%d start from index : %d\n",idx,t); for(i=t; i<t+length; i++) { sum += series[i]; sum_sqr += series[i] * series[i]; } mean = sum / length; mean_sqr = mean*mean; variance = (sum_sqr/length) - mean_sqr; std_dev = sqrt(variance); i = 0; for(i= t; i<t + length; i++){ series[i] = (series[i]-mean) / std_dev; output[i] = (series[i]-mean) / std_dev; } } } __global__ void DTWDistance(float* test_data, float* train_data, int length, int window, const int numberToCalc, const int current, float* distance,int trainIdx, int testIdx){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if(numberToCalc > idx){ float* seriesA = test_data + (length * testIdx); float* seriesB = train_data + (length * trainIdx); int k = 0; int start = numberToCalc - 1; if(current > length - 1){ start = current + (length-1) * (length - numberToCalc); } int arrayIdx = start + idx * (length - 1); int i = arrayIdx / length; int j = arrayIdx % length; float dist; start = max(0,i - window); int end = min(length, i + window); if(j >= start && j < end){ //printf("seriesA[%d] : %f seriesB[%d] : %f\n",i,seriesA[i],j, seriesB[j]); dist = pow(seriesA[i] - seriesB[j], 2); //printf("(%d,%d) - Distance %f\n",i,j, dist); float left = /*idx > 0 && j > 0*/ j > 0 ? distance[arrayIdx - 1] : INT_MAX; float top = arrayIdx - length >= 0 ? distance[arrayIdx - length] : INT_MAX; float diagonal = arrayIdx - length > 0 && j > 0 ? distance[arrayIdx - length - 1] : INT_MAX; diagonal = (i == 0 && j == 0) ? 0 : diagonal; diagonal = min(diagonal,left); dist = dist + min(top,diagonal); distance[arrayIdx] = dist; //printf("%d %d %f %f %f Min Value : %f Final : %f\n",i,j, //left,top,diagonal, min(top,diagonal), distance[arrayIdx]); } } } __global__ void tempCheck(float* A, float* B, int length) { int i = 0; for(i = 0; i < length; i++){ printf("%f %f\n",A[i],B[i]); } } __global__ void LB_Keogh(float* test_data, float* train_data, int length, int window, int trainIdx, int testIdx, float* d_LB_dist){ int i = threadIdx.x, start_idx = 0, end_idx, j; extern __shared__ float output[]; if(i < length){ float* seriesA = test_data + (length * testIdx); float* seriesB = train_data + (length * trainIdx); float lower_bound = INT_MAX, upper_bound = INT_MIN, current; lower_bound = INT_MAX; upper_bound = INT_MIN; start_idx = i - window >= 0 ? i - window : 0; end_idx = min(i + window, length); for(j = start_idx; j < end_idx; j++){ if(seriesB[j] > upper_bound) upper_bound = seriesB[j]; if(seriesB[j] < lower_bound) lower_bound = seriesB[j]; } current = seriesA[i]; float val = 0; if (current>upper_bound) val = pow(current-upper_bound,2); else if (current<lower_bound) val = pow(current-lower_bound,2); output[i] =val; __syncthreads(); if (i == 0) { d_LB_dist[0] = 0; int k; for (k = 0; k < length; k++) { d_LB_dist[0] += output[k]; } d_LB_dist[0] = sqrt(d_LB_dist[0]); } } } //////////////////////////////////////////////////////////////// float DTWDistance_CPU(float* seriesA, float* seriesB, int length, int window){ int i = -1, j = -1; float* distances = (float*) malloc(length * length * sizeof(float)); float dist; //memset(distances,FLT_MAX ,(length) * (length)); uint64_t k; for(k = 0; k < length * length; k++){ distances[k] = FLT_MAX; } for(i = 0; i < length; i++){ int start = max(0,i - window); int end = min(length, i + window); for(j = start; j < end; j++){ //printf("seriesA[%d] : %f seriesB[%d] : %f\n",i,seriesA[i],j, seriesB[j]); dist = pow(seriesA[i] - seriesB[j], 2); //printf("(%d,%d) - Distance %f\n",i,j, dist); int idx = (i * length) + j; float left = /*idx > 0 && j > 0*/ j > start ? distances[idx - 1] : INT_MAX; float top = idx - length >= 0 ? distances[idx - length] : INT_MAX; float diagonal = idx - length > 0 && j > 0 ? distances[idx - length - 1] : INT_MAX; diagonal = (i == 0 && j == 0) ? 0 : diagonal; diagonal = min(diagonal,left); distances[idx] = dist + min(top,diagonal); //printf("%d %d %f %f %f Min Value : %f Final : %f\n",i,j, //left,top,diagonal, min(top,diagonal), distances[idx]); } } /*for(i = 0 ; i < length; i++){ for(j = 0; j < length; j++){ int idx = (i * length) + j; printf("%d %d %f | ",i,j, distances[idx]); } printf("\n"); }*/ float result = sqrt(distances[length * length - 1]); return result; } float LB_Keogh_CPU(float* seriesA, float* seriesB, int length, int window){ float LB_sum = 0; int i = 0, start_idx = 0, end_idx, j; float lower_bound = INT_MAX, upper_bound = INT_MIN, current; for(;i < length; i++){ lower_bound = INT_MAX; upper_bound = INT_MIN; start_idx = i - window >= 0 ? i - window : 0; end_idx = min(i + window, length); for(j = start_idx; j < end_idx; j++){ if(seriesB[j] > upper_bound) upper_bound = seriesB[j]; if(seriesB[j] < lower_bound) lower_bound = seriesB[j]; } current = seriesA[i]; //printf("%f %d %f %f %f\n", seriesA[i], i, lower_bound, upper_bound, LB_sum); float val = 0; if (current>upper_bound) val = pow(current-upper_bound,2); else if (current<lower_bound) val = pow(current-lower_bound,2); //printf("%d %f\n",i,val); LB_sum +=val; } return sqrt(LB_sum); } int main(int argc, char * argv[]) { clock_t start, end; fprintf(stderr, "Initializing ... \n"); char* inputFileName = argv[1]; int isDefault = 0; if(!inputFileName){ printf("No test file provided. Using default file : SonyAIBORobotSurface_TRAIN\n"); inputFileName = "SonyAIBORobotSurface_TRAIN"; isDefault = 1; } if(argc > 1){ noOfTimeSeries = atoi(argv[2]); }else{ if(isDefault == 0){ printf("Number of time series not provided. Exiting\n"); exit(0); } } if(argc > 2){ lenOfTimeSeries = atoi(argv[3]); } else{ if(isDefault == 0){ printf("Length of time series not provided. Exiting\n"); exit(0); } } uint64_t train_size = noOfTimeSeries * lenOfTimeSeries * sizeof(float); uint64_t test_size;// = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float); //storage allocation for train data and train class labels float* train_data = (float*) malloc(train_size); int* train_class = (int *) malloc(noOfTimeSeries*sizeof(int)); //storage allocation for test data and test class labels float* test_data;// = (float*) malloc (test_size); int* test_class;// = (int *) malloc(noOfTestTimeSeries * sizeof(int)); //get training file printf("Reading train file\n"); //read training file readfile(inputFileName, train_data, train_class, noOfTimeSeries); printf("===================================================\n"); printf("Training File : %s\n",inputFileName); printf("Number of Time Series : %d\n",noOfTimeSeries); printf("Length of Time Series : %d\n",lenOfTimeSeries); int window_size = lenOfTimeSeries / 10; int LB_Keogh_param = 5; // If Testing File is provided if(argc > 5 || isDefault == 1){ char* testFileName; if(isDefault == 0){ testFileName = argv[4]; noOfTestTimeSeries = atoi(argv[5]); }else{ testFileName = "SonyAIBORobotSurface_TEST"; } if(argc > 6){ window_size = atoi(argv[6]); } if(argc > 7){ LB_Keogh_param = atoi(argv[7]); } printf("-----------------------------------------------------\n"); //get testing file printf("Reading test file\n"); test_size = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float); test_data = (float*) malloc (test_size); test_class = (int *) malloc(noOfTestTimeSeries * sizeof(int)); //read test file readfile(testFileName, test_data, test_class, noOfTestTimeSeries); printf("Testing File : %s\n",testFileName); printf("Number of Time Series to validate: %d\n",noOfTestTimeSeries); printf("Window Size for kNN: %d\n",window_size); printf("LB Keogh Parameter: %d\n",LB_Keogh_param); } printf("===================================================\n"); //GPU number present in the system //int noOfGPUs; //checkCudaErrors(hipGetDeviceCount(&noOfGPUs)); //printf("Total GPUs on System : %d\n", noOfGPUs); //checkCudaErrors(hipSetDevice(0)); checkCudaErrors(hipDeviceReset()); hipProfilerStart(); int i = 0, j = 0, errorCount = 0; start = clock(); float* d_test_data; clock_t timer = clock(); checkCudaErrors(hipMalloc((void**)&d_test_data, test_size)); checkCudaErrors(hipMemcpy(d_test_data, test_data, test_size, hipMemcpyHostToDevice)); printf("Time to upload test data %f\n", (double)(clock() - timer) / ((double)CLOCKS_PER_SEC)); // Transfer train data float* d_train_data = 0; timer = clock(); checkCudaErrors(hipMalloc((void**)&d_train_data, train_size)); checkCudaErrors(hipMemcpy(d_train_data, train_data, train_size, hipMemcpyHostToDevice)); printf("Time to upload train data %f\n", (double)(clock() - timer) / ((double)CLOCKS_PER_SEC)); int k; //float* d_norm_train_data = 0; //checkCudaErrors(hipMalloc((void**)&d_norm_train_data, train_size)); // Normalize Train Series int threadsPerBlock = min((int)ceil(noOfTimeSeries/(float)32)*32,1024); int noOfBlocks = ceil((noOfTimeSeries)/(float)threadsPerBlock); //NormalizeTimeSeries<<<noOfBlocks, threadsPerBlock>>>(d_train_data, noOfTimeSeries, lenOfTimeSeries,d_norm_train_data); //checkCudaErrors(hipDeviceSynchronize()); //checkCudaErrors(hipMemcpy(d_train_data, d_norm_train_data, train_size, hipMemcpyDeviceToDevice)); float* d_LB_dist; float min_dist, dist; //printf("Normalized Train Series\n"); //tempCheck<<<1,1>>>(d_norm_train_data,d_train_data,noOfTimeSeries * lenOfTimeSeries); int predictedIndex = 0; for(i = 0; i < noOfTestTimeSeries; i++){ min_dist = FLT_MAX; for(j = 0; j < noOfTimeSeries; j++){ float* LB_dist=(float*)malloc(sizeof(float)); checkCudaErrors(hipMalloc((void**)&d_LB_dist, sizeof(float))); threadsPerBlock = min((int)ceil(lenOfTimeSeries/(float)32)*32,1024); hipLaunchKernelGGL(( LB_Keogh), dim3(1),dim3(threadsPerBlock),lenOfTimeSeries*sizeof(float), 0, d_test_data,d_train_data,lenOfTimeSeries,LB_Keogh_param,j,i,d_LB_dist); checkCudaErrors(hipMemcpy(LB_dist, d_LB_dist, sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_LB_dist)); //printf("LB_Keogh between %d and %d is %f\n",i,j,LB_dist[0]); float* test = test_data + (lenOfTimeSeries * i); float* train = train_data + (lenOfTimeSeries * j); if(LB_dist[0] < min_dist){ float* distances = (float*) malloc(lenOfTimeSeries * lenOfTimeSeries * sizeof(float)); float* d_distances; checkCudaErrors(hipMalloc((void**)&d_distances, lenOfTimeSeries * lenOfTimeSeries * sizeof(float))); // init to FLT_MAX checkCudaErrors(hipMemset(d_distances,FLT_MAX, lenOfTimeSeries * lenOfTimeSeries * sizeof(float))); for(k = 0; k < (2 * lenOfTimeSeries - 1); k++){ if(k < lenOfTimeSeries){ threadsPerBlock = k + 1; }else{ threadsPerBlock = (2 * lenOfTimeSeries) - k - 1; } int temp = threadsPerBlock; threadsPerBlock = min((int)ceil(threadsPerBlock/(float)32)*32,1024); noOfBlocks = ceil((temp)/(float)threadsPerBlock); hipLaunchKernelGGL(( DTWDistance), dim3(noOfBlocks),dim3(threadsPerBlock), 0, 0, d_test_data,d_train_data,lenOfTimeSeries, window_size,temp,k,d_distances, j,i); hipDeviceSynchronize(); } checkCudaErrors(hipMemcpy(distances, d_distances,lenOfTimeSeries * lenOfTimeSeries * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_distances)); dist = distances[lenOfTimeSeries * lenOfTimeSeries - 1]; dist = sqrt(dist); free(distances); if(dist < min_dist){ min_dist = dist; predictedIndex = j; //printf("Min Distance between %d and %d is %f\n",i,j,dist); } } } // Check for stuff int predictedClass = train_class[predictedIndex]; int correctClass = test_class[i]; //printf("Test Case : %d Predicted Class :%d Actual Class : %d\n",i, predictedClass, correctClass); if(predictedClass != correctClass){ errorCount++; } } end = clock() - start; hipProfilerStop(); checkCudaErrors(hipDeviceReset()); hipFree(d_LB_dist); hipFree(d_train_data); hipFree(d_test_data); double endtime = (double)end / ((double)CLOCKS_PER_SEC); printf("Total GPU Time %f\n", endtime); printf("GPU Accuracy is %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries)); //printf("--------------------------------------------\n"); //printf("CPU : \n"); errorCount = 0; start = clock(); for(i = 0; i < noOfTestTimeSeries; i++){ min_dist = FLT_MAX; for(j = 0; j < noOfTimeSeries; j++){ float* test = test_data + (lenOfTimeSeries * i); float* train = train_data + (lenOfTimeSeries * j); float LB_dist = LB_Keogh_CPU(test,train,lenOfTimeSeries,LB_Keogh_param); //printf("LB_Keogh between %d and %d is %f\n",i,j,LB_dist); if(LB_dist < min_dist){ dist = DTWDistance_CPU(test,train,lenOfTimeSeries, window_size); if(dist < min_dist){ min_dist = dist; predictedIndex = j; //printf("Min Distance between %d and %d is %f\n",i,j,dist); } } } // Check for stuff int predictedClass = train_class[predictedIndex]; int correctClass = test_class[i]; //printf("Test Case : %d Predicted Class :%d Actual Class : %d\n",i, predictedClass, correctClass); if(predictedClass != correctClass){ errorCount++; } } free(train_class); free(test_class); end = clock() - start; endtime = (double)end / ((double)CLOCKS_PER_SEC); //printf("Total CPU Time : %f\n", endtime); //printf("CPU Accuracy : %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries)); return 0; }
952ae7011fd78ec1746ddd878b27391207012245.cu
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include <fstream> #include <limits.h> #include <float.h> #include <cuda_profiler_api.h> #include "helper.h" ///////////////////////////////////////////////////////////////////////// // Init ///////////////////////////////////////////////////////////////////////// uint64_t noOfTimeSeries = 20; uint64_t lenOfTimeSeries = 70; uint64_t noOfTestTimeSeries = 10; ///////////////////////////////////////////////////////////////////////// void usage(){ printf("********************************\n"); printf("************* USAGE ************\n"); printf("********************************\n"); printf("./knn [training-file] [number-of-time-series] [length-of-time-series] [testing-file] [number-of-times-series-in-test] [window_size]\n"); printf("eg. ./knn SonyAIBORobotSurface_TRAIN 20 70 \n"); printf("********************************\n"); } void readfile(char* inputFileName,float* _data,int* _class,uint64_t len) { std::ifstream in_file; in_file.open(inputFileName); if(!in_file) { printf("\nFile Not Found !"); exit(1); } float class_in; float data_in; long int i, j; for(i=0; i<len; i++) { in_file >> class_in; _class[i] = (int)class_in; //printf("class : %d\n",_class[i]); for (j=0; j<lenOfTimeSeries; j++) { in_file >> data_in; _data[i*lenOfTimeSeries+j] = data_in; //printf("%f, ",_data[i*lenOfTimeSeries+j]); } //printf("\n"); } in_file.close(); } //////////////////////////////////////////////////////////////// __global__ void NormalizeTimeSeries(float* series,const int totalTimeSeries, const int length, float* output){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < totalTimeSeries){ int i=0; float sum = 0, sum_sqr = 0, mean = 0, mean_sqr = 0, variance = 0, std_dev = 0; int t = idx * length; //printf("Normalize Train Series #%d start from index : %d\n",idx,t); for(i=t; i<t+length; i++) { sum += series[i]; sum_sqr += series[i] * series[i]; } mean = sum / length; mean_sqr = mean*mean; variance = (sum_sqr/length) - mean_sqr; std_dev = sqrt(variance); i = 0; for(i= t; i<t + length; i++){ series[i] = (series[i]-mean) / std_dev; output[i] = (series[i]-mean) / std_dev; } } } __global__ void DTWDistance(float* test_data, float* train_data, int length, int window, const int numberToCalc, const int current, float* distance,int trainIdx, int testIdx){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if(numberToCalc > idx){ float* seriesA = test_data + (length * testIdx); float* seriesB = train_data + (length * trainIdx); int k = 0; int start = numberToCalc - 1; if(current > length - 1){ start = current + (length-1) * (length - numberToCalc); } int arrayIdx = start + idx * (length - 1); int i = arrayIdx / length; int j = arrayIdx % length; float dist; start = max(0,i - window); int end = min(length, i + window); if(j >= start && j < end){ //printf("seriesA[%d] : %f seriesB[%d] : %f\n",i,seriesA[i],j, seriesB[j]); dist = pow(seriesA[i] - seriesB[j], 2); //printf("(%d,%d) - Distance %f\n",i,j, dist); float left = /*idx > 0 && j > 0*/ j > 0 ? distance[arrayIdx - 1] : INT_MAX; float top = arrayIdx - length >= 0 ? distance[arrayIdx - length] : INT_MAX; float diagonal = arrayIdx - length > 0 && j > 0 ? distance[arrayIdx - length - 1] : INT_MAX; diagonal = (i == 0 && j == 0) ? 0 : diagonal; diagonal = min(diagonal,left); dist = dist + min(top,diagonal); distance[arrayIdx] = dist; //printf("%d %d %f %f %f Min Value : %f Final : %f\n",i,j, //left,top,diagonal, min(top,diagonal), distance[arrayIdx]); } } } __global__ void tempCheck(float* A, float* B, int length) { int i = 0; for(i = 0; i < length; i++){ printf("%f %f\n",A[i],B[i]); } } __global__ void LB_Keogh(float* test_data, float* train_data, int length, int window, int trainIdx, int testIdx, float* d_LB_dist){ int i = threadIdx.x, start_idx = 0, end_idx, j; extern __shared__ float output[]; if(i < length){ float* seriesA = test_data + (length * testIdx); float* seriesB = train_data + (length * trainIdx); float lower_bound = INT_MAX, upper_bound = INT_MIN, current; lower_bound = INT_MAX; upper_bound = INT_MIN; start_idx = i - window >= 0 ? i - window : 0; end_idx = min(i + window, length); for(j = start_idx; j < end_idx; j++){ if(seriesB[j] > upper_bound) upper_bound = seriesB[j]; if(seriesB[j] < lower_bound) lower_bound = seriesB[j]; } current = seriesA[i]; float val = 0; if (current>upper_bound) val = pow(current-upper_bound,2); else if (current<lower_bound) val = pow(current-lower_bound,2); output[i] =val; __syncthreads(); if (i == 0) { d_LB_dist[0] = 0; int k; for (k = 0; k < length; k++) { d_LB_dist[0] += output[k]; } d_LB_dist[0] = sqrt(d_LB_dist[0]); } } } //////////////////////////////////////////////////////////////// float DTWDistance_CPU(float* seriesA, float* seriesB, int length, int window){ int i = -1, j = -1; float* distances = (float*) malloc(length * length * sizeof(float)); float dist; //memset(distances,FLT_MAX ,(length) * (length)); uint64_t k; for(k = 0; k < length * length; k++){ distances[k] = FLT_MAX; } for(i = 0; i < length; i++){ int start = max(0,i - window); int end = min(length, i + window); for(j = start; j < end; j++){ //printf("seriesA[%d] : %f seriesB[%d] : %f\n",i,seriesA[i],j, seriesB[j]); dist = pow(seriesA[i] - seriesB[j], 2); //printf("(%d,%d) - Distance %f\n",i,j, dist); int idx = (i * length) + j; float left = /*idx > 0 && j > 0*/ j > start ? distances[idx - 1] : INT_MAX; float top = idx - length >= 0 ? distances[idx - length] : INT_MAX; float diagonal = idx - length > 0 && j > 0 ? distances[idx - length - 1] : INT_MAX; diagonal = (i == 0 && j == 0) ? 0 : diagonal; diagonal = min(diagonal,left); distances[idx] = dist + min(top,diagonal); //printf("%d %d %f %f %f Min Value : %f Final : %f\n",i,j, //left,top,diagonal, min(top,diagonal), distances[idx]); } } /*for(i = 0 ; i < length; i++){ for(j = 0; j < length; j++){ int idx = (i * length) + j; printf("%d %d %f | ",i,j, distances[idx]); } printf("\n"); }*/ float result = sqrt(distances[length * length - 1]); return result; } float LB_Keogh_CPU(float* seriesA, float* seriesB, int length, int window){ float LB_sum = 0; int i = 0, start_idx = 0, end_idx, j; float lower_bound = INT_MAX, upper_bound = INT_MIN, current; for(;i < length; i++){ lower_bound = INT_MAX; upper_bound = INT_MIN; start_idx = i - window >= 0 ? i - window : 0; end_idx = min(i + window, length); for(j = start_idx; j < end_idx; j++){ if(seriesB[j] > upper_bound) upper_bound = seriesB[j]; if(seriesB[j] < lower_bound) lower_bound = seriesB[j]; } current = seriesA[i]; //printf("%f %d %f %f %f\n", seriesA[i], i, lower_bound, upper_bound, LB_sum); float val = 0; if (current>upper_bound) val = pow(current-upper_bound,2); else if (current<lower_bound) val = pow(current-lower_bound,2); //printf("%d %f\n",i,val); LB_sum +=val; } return sqrt(LB_sum); } int main(int argc, char * argv[]) { clock_t start, end; fprintf(stderr, "Initializing ... \n"); char* inputFileName = argv[1]; int isDefault = 0; if(!inputFileName){ printf("No test file provided. Using default file : SonyAIBORobotSurface_TRAIN\n"); inputFileName = "SonyAIBORobotSurface_TRAIN"; isDefault = 1; } if(argc > 1){ noOfTimeSeries = atoi(argv[2]); }else{ if(isDefault == 0){ printf("Number of time series not provided. Exiting\n"); exit(0); } } if(argc > 2){ lenOfTimeSeries = atoi(argv[3]); } else{ if(isDefault == 0){ printf("Length of time series not provided. Exiting\n"); exit(0); } } uint64_t train_size = noOfTimeSeries * lenOfTimeSeries * sizeof(float); uint64_t test_size;// = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float); //storage allocation for train data and train class labels float* train_data = (float*) malloc(train_size); int* train_class = (int *) malloc(noOfTimeSeries*sizeof(int)); //storage allocation for test data and test class labels float* test_data;// = (float*) malloc (test_size); int* test_class;// = (int *) malloc(noOfTestTimeSeries * sizeof(int)); //get training file printf("Reading train file\n"); //read training file readfile(inputFileName, train_data, train_class, noOfTimeSeries); printf("===================================================\n"); printf("Training File : %s\n",inputFileName); printf("Number of Time Series : %d\n",noOfTimeSeries); printf("Length of Time Series : %d\n",lenOfTimeSeries); int window_size = lenOfTimeSeries / 10; int LB_Keogh_param = 5; // If Testing File is provided if(argc > 5 || isDefault == 1){ char* testFileName; if(isDefault == 0){ testFileName = argv[4]; noOfTestTimeSeries = atoi(argv[5]); }else{ testFileName = "SonyAIBORobotSurface_TEST"; } if(argc > 6){ window_size = atoi(argv[6]); } if(argc > 7){ LB_Keogh_param = atoi(argv[7]); } printf("-----------------------------------------------------\n"); //get testing file printf("Reading test file\n"); test_size = noOfTestTimeSeries * lenOfTimeSeries * sizeof(float); test_data = (float*) malloc (test_size); test_class = (int *) malloc(noOfTestTimeSeries * sizeof(int)); //read test file readfile(testFileName, test_data, test_class, noOfTestTimeSeries); printf("Testing File : %s\n",testFileName); printf("Number of Time Series to validate: %d\n",noOfTestTimeSeries); printf("Window Size for kNN: %d\n",window_size); printf("LB Keogh Parameter: %d\n",LB_Keogh_param); } printf("===================================================\n"); //GPU number present in the system //int noOfGPUs; //checkCudaErrors(cudaGetDeviceCount(&noOfGPUs)); //printf("Total GPUs on System : %d\n", noOfGPUs); //checkCudaErrors(cudaSetDevice(0)); checkCudaErrors(cudaDeviceReset()); cudaProfilerStart(); int i = 0, j = 0, errorCount = 0; start = clock(); float* d_test_data; clock_t timer = clock(); checkCudaErrors(cudaMalloc((void**)&d_test_data, test_size)); checkCudaErrors(cudaMemcpy(d_test_data, test_data, test_size, cudaMemcpyHostToDevice)); printf("Time to upload test data %f\n", (double)(clock() - timer) / ((double)CLOCKS_PER_SEC)); // Transfer train data float* d_train_data = 0; timer = clock(); checkCudaErrors(cudaMalloc((void**)&d_train_data, train_size)); checkCudaErrors(cudaMemcpy(d_train_data, train_data, train_size, cudaMemcpyHostToDevice)); printf("Time to upload train data %f\n", (double)(clock() - timer) / ((double)CLOCKS_PER_SEC)); int k; //float* d_norm_train_data = 0; //checkCudaErrors(cudaMalloc((void**)&d_norm_train_data, train_size)); // Normalize Train Series int threadsPerBlock = min((int)ceil(noOfTimeSeries/(float)32)*32,1024); int noOfBlocks = ceil((noOfTimeSeries)/(float)threadsPerBlock); //NormalizeTimeSeries<<<noOfBlocks, threadsPerBlock>>>(d_train_data, noOfTimeSeries, lenOfTimeSeries,d_norm_train_data); //checkCudaErrors(cudaDeviceSynchronize()); //checkCudaErrors(cudaMemcpy(d_train_data, d_norm_train_data, train_size, cudaMemcpyDeviceToDevice)); float* d_LB_dist; float min_dist, dist; //printf("Normalized Train Series\n"); //tempCheck<<<1,1>>>(d_norm_train_data,d_train_data,noOfTimeSeries * lenOfTimeSeries); int predictedIndex = 0; for(i = 0; i < noOfTestTimeSeries; i++){ min_dist = FLT_MAX; for(j = 0; j < noOfTimeSeries; j++){ float* LB_dist=(float*)malloc(sizeof(float)); checkCudaErrors(cudaMalloc((void**)&d_LB_dist, sizeof(float))); threadsPerBlock = min((int)ceil(lenOfTimeSeries/(float)32)*32,1024); LB_Keogh<<<1,threadsPerBlock,lenOfTimeSeries*sizeof(float)>>>(d_test_data,d_train_data,lenOfTimeSeries,LB_Keogh_param,j,i,d_LB_dist); checkCudaErrors(cudaMemcpy(LB_dist, d_LB_dist, sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_LB_dist)); //printf("LB_Keogh between %d and %d is %f\n",i,j,LB_dist[0]); float* test = test_data + (lenOfTimeSeries * i); float* train = train_data + (lenOfTimeSeries * j); if(LB_dist[0] < min_dist){ float* distances = (float*) malloc(lenOfTimeSeries * lenOfTimeSeries * sizeof(float)); float* d_distances; checkCudaErrors(cudaMalloc((void**)&d_distances, lenOfTimeSeries * lenOfTimeSeries * sizeof(float))); // init to FLT_MAX checkCudaErrors(cudaMemset(d_distances,FLT_MAX, lenOfTimeSeries * lenOfTimeSeries * sizeof(float))); for(k = 0; k < (2 * lenOfTimeSeries - 1); k++){ if(k < lenOfTimeSeries){ threadsPerBlock = k + 1; }else{ threadsPerBlock = (2 * lenOfTimeSeries) - k - 1; } int temp = threadsPerBlock; threadsPerBlock = min((int)ceil(threadsPerBlock/(float)32)*32,1024); noOfBlocks = ceil((temp)/(float)threadsPerBlock); DTWDistance<<<noOfBlocks,threadsPerBlock>>>(d_test_data,d_train_data,lenOfTimeSeries, window_size,temp,k,d_distances, j,i); cudaThreadSynchronize(); } checkCudaErrors(cudaMemcpy(distances, d_distances,lenOfTimeSeries * lenOfTimeSeries * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_distances)); dist = distances[lenOfTimeSeries * lenOfTimeSeries - 1]; dist = sqrt(dist); free(distances); if(dist < min_dist){ min_dist = dist; predictedIndex = j; //printf("Min Distance between %d and %d is %f\n",i,j,dist); } } } // Check for stuff int predictedClass = train_class[predictedIndex]; int correctClass = test_class[i]; //printf("Test Case : %d Predicted Class :%d Actual Class : %d\n",i, predictedClass, correctClass); if(predictedClass != correctClass){ errorCount++; } } end = clock() - start; cudaProfilerStop(); checkCudaErrors(cudaDeviceReset()); cudaFree(d_LB_dist); cudaFree(d_train_data); cudaFree(d_test_data); double endtime = (double)end / ((double)CLOCKS_PER_SEC); printf("Total GPU Time %f\n", endtime); printf("GPU Accuracy is %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries)); //printf("--------------------------------------------\n"); //printf("CPU : \n"); errorCount = 0; start = clock(); for(i = 0; i < noOfTestTimeSeries; i++){ min_dist = FLT_MAX; for(j = 0; j < noOfTimeSeries; j++){ float* test = test_data + (lenOfTimeSeries * i); float* train = train_data + (lenOfTimeSeries * j); float LB_dist = LB_Keogh_CPU(test,train,lenOfTimeSeries,LB_Keogh_param); //printf("LB_Keogh between %d and %d is %f\n",i,j,LB_dist); if(LB_dist < min_dist){ dist = DTWDistance_CPU(test,train,lenOfTimeSeries, window_size); if(dist < min_dist){ min_dist = dist; predictedIndex = j; //printf("Min Distance between %d and %d is %f\n",i,j,dist); } } } // Check for stuff int predictedClass = train_class[predictedIndex]; int correctClass = test_class[i]; //printf("Test Case : %d Predicted Class :%d Actual Class : %d\n",i, predictedClass, correctClass); if(predictedClass != correctClass){ errorCount++; } } free(train_class); free(test_class); end = clock() - start; endtime = (double)end / ((double)CLOCKS_PER_SEC); //printf("Total CPU Time : %f\n", endtime); //printf("CPU Accuracy : %f\n",(float)(noOfTestTimeSeries-errorCount)*(100.0/noOfTestTimeSeries)); return 0; }
e4e58d82b0a290e0992a2e8d9e297bb0a127cfc0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define NUM_BLOCKS 16 #define BLOCK_WIDTH 1 __global__ void hello() { printf("Hello world! I'm a thread in block %d\n",blockIdx.x); } __global__ void use_local_memory_GPU(float in) { float f; f = in; } __global__ void use_global_memory_GPU(float *arr) { arr[threadIdx.x] = 2.0f * float(threadIdx.x); } __global__ void use_shared_memory_GPU(float *arr) { int i, index = threadIdx.x; float average, sum = 0.0f; __shared__ float sh_arr[128]; sh_arr[index] = arr[index]; __syncthreads(); for (int i = 0; i < index; i++) { sum += sh_arr[i]; } average = sum/(index +1.0f); if(arr[index] > average){arr[index] = average;} sh_arr[index] = 3.14; } int main() { // launch the kernel //hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush //hipDeviceSynchronize(); //use_local_memory_GPU<<<1,128>>>(2.0f); float h_arr[128]; for (int i = 0; i < 128; i++) h_arr[i] = float(i); float *d_arr; // allocate global memory on the device, place result in "d_arr" int array_bites = 128*sizeof(float); hipMalloc((void**)&d_arr, array_bites); // transfer the array to the GPU hipMemcpy(d_arr, h_arr, array_bites, hipMemcpyHostToDevice); // launch the kernal hipLaunchKernelGGL(( use_global_memory_GPU), dim3(1), dim3(128), 0, 0, d_arr); // copy back the result to the CPU hipMemcpy(h_arr, d_arr, array_bites, hipMemcpyDeviceToHost); for (int i = 0; i < 128; i++) printf("%f\n", h_arr[i]); hipLaunchKernelGGL(( use_shared_memory_GPU), dim3(1),dim3(128), 0, 0, d_arr); hipMemcpy(h_arr, d_arr, array_bites, hipMemcpyDeviceToHost); for (int i = 0; i < 128; i++) printf("%f\n", h_arr[i]); hipFree(d_arr); printf("That's all!\n"); return 0; }
e4e58d82b0a290e0992a2e8d9e297bb0a127cfc0.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define NUM_BLOCKS 16 #define BLOCK_WIDTH 1 __global__ void hello() { printf("Hello world! I'm a thread in block %d\n",blockIdx.x); } __global__ void use_local_memory_GPU(float in) { float f; f = in; } __global__ void use_global_memory_GPU(float *arr) { arr[threadIdx.x] = 2.0f * float(threadIdx.x); } __global__ void use_shared_memory_GPU(float *arr) { int i, index = threadIdx.x; float average, sum = 0.0f; __shared__ float sh_arr[128]; sh_arr[index] = arr[index]; __syncthreads(); for (int i = 0; i < index; i++) { sum += sh_arr[i]; } average = sum/(index +1.0f); if(arr[index] > average){arr[index] = average;} sh_arr[index] = 3.14; } int main() { // launch the kernel //hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush //cudaDeviceSynchronize(); //use_local_memory_GPU<<<1,128>>>(2.0f); float h_arr[128]; for (int i = 0; i < 128; i++) h_arr[i] = float(i); float *d_arr; // allocate global memory on the device, place result in "d_arr" int array_bites = 128*sizeof(float); cudaMalloc((void**)&d_arr, array_bites); // transfer the array to the GPU cudaMemcpy(d_arr, h_arr, array_bites, cudaMemcpyHostToDevice); // launch the kernal use_global_memory_GPU<<<1, 128>>>(d_arr); // copy back the result to the CPU cudaMemcpy(h_arr, d_arr, array_bites, cudaMemcpyDeviceToHost); for (int i = 0; i < 128; i++) printf("%f\n", h_arr[i]); use_shared_memory_GPU<<<1,128>>>(d_arr); cudaMemcpy(h_arr, d_arr, array_bites, cudaMemcpyDeviceToHost); for (int i = 0; i < 128; i++) printf("%f\n", h_arr[i]); cudaFree(d_arr); printf("That's all!\n"); return 0; }
d48d8c12667d1ed45f852c95dba5aa5ef3b93c53.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cmath> #include <cstdio> #include <iostream> #include <chrono> using namespace std; // Compute vector sum C = A + B void vecAddCPU(float *h_A, float* h_B, float* h_C, int n) { for (int i = 0; i < n; i++) { h_C[i] = h_A[i] + h_B[i]; } } // Compute vector sum C = A + B // Each thread performs onne pair-wise addition __global__ void vecAddKernel(float* A, float* B, float* C, int n) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < n) { C[i] = A[i] + B[i]; } } void vecAddGPU(float* A, float* B, float* C, int n) { int size = n * sizeof(float); float *d_A, *d_B, *d_C; hipMalloc(&d_A, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMalloc(&d_B, size); hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); hipMalloc(&d_C, size); chrono::time_point<chrono::system_clock> GPU_Start, GPU_End; GPU_Start = chrono::system_clock::now(); hipLaunchKernelGGL(( vecAddKernel) , dim3(ceil(n/256.0)), dim3(256) , 0, 0, d_A, d_B, d_C, n); GPU_End = chrono::system_clock::now(); cout << "GPU: " << chrono::duration_cast<chrono::nanoseconds>(GPU_End - GPU_Start).count() << "ns." << endl; hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost); //Free device memory for A, B, C hipFree(d_A); hipFree(d_B); hipFree(d_C); } int main() { // Memory allocation for h_A, h_B and h_C float *h_A, *h_B, *h_C; int n = 0; //int sum = 0; // I/O to read h_A and h_B, N elements each //cout << "Insert N:" << endl; //cin >> n; n = 100000000; h_A = (float*)malloc(n * sizeof(float)); h_B = (float*)malloc(n * sizeof(float)); h_C = (float*)malloc(n * sizeof(float)); for (int i = 0; i < n; i++) { h_A[i] = i; h_B[i] = i; h_C[i] = 0; } chrono::time_point<chrono::system_clock> CPU_Start, CPU_End; CPU_Start = chrono::system_clock::now(); vecAddCPU(h_A, h_B, h_C, n); CPU_End = chrono::system_clock::now(); cout << "CPU: " << chrono::duration_cast<chrono::nanoseconds>(CPU_End - CPU_Start).count() << "ns." << endl; //chrono::time_point<chrono::system_clock> GPU_Start, GPU_End; //GPU_Start = chrono::system_clock::now(); vecAddGPU(h_A, h_B, h_C, n); //GPU_End = chrono::system_clock::now(); //cout << "GPU: " << chrono::duration_cast<chrono::nanoseconds>(GPU_End - GPU_Start).count() << "ns." << endl; /* for (int i = 0; i < n; i++) { //cout << h_C[i] << " "; sum += h_C[i]; } cout << sum << endl; */ free(h_A); free(h_B); free(h_C); return 0; }
d48d8c12667d1ed45f852c95dba5aa5ef3b93c53.cu
#include <cuda.h> #include <cmath> #include <cstdio> #include <iostream> #include <chrono> using namespace std; // Compute vector sum C = A + B void vecAddCPU(float *h_A, float* h_B, float* h_C, int n) { for (int i = 0; i < n; i++) { h_C[i] = h_A[i] + h_B[i]; } } // Compute vector sum C = A + B // Each thread performs onne pair-wise addition __global__ void vecAddKernel(float* A, float* B, float* C, int n) { int i = blockDim.x*blockIdx.x + threadIdx.x; if (i < n) { C[i] = A[i] + B[i]; } } void vecAddGPU(float* A, float* B, float* C, int n) { int size = n * sizeof(float); float *d_A, *d_B, *d_C; cudaMalloc(&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc(&d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaMalloc(&d_C, size); chrono::time_point<chrono::system_clock> GPU_Start, GPU_End; GPU_Start = chrono::system_clock::now(); vecAddKernel <<< ceil(n/256.0), 256 >>> (d_A, d_B, d_C, n); GPU_End = chrono::system_clock::now(); cout << "GPU: " << chrono::duration_cast<chrono::nanoseconds>(GPU_End - GPU_Start).count() << "ns." << endl; cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); //Free device memory for A, B, C cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main() { // Memory allocation for h_A, h_B and h_C float *h_A, *h_B, *h_C; int n = 0; //int sum = 0; // I/O to read h_A and h_B, N elements each //cout << "Insert N:" << endl; //cin >> n; n = 100000000; h_A = (float*)malloc(n * sizeof(float)); h_B = (float*)malloc(n * sizeof(float)); h_C = (float*)malloc(n * sizeof(float)); for (int i = 0; i < n; i++) { h_A[i] = i; h_B[i] = i; h_C[i] = 0; } chrono::time_point<chrono::system_clock> CPU_Start, CPU_End; CPU_Start = chrono::system_clock::now(); vecAddCPU(h_A, h_B, h_C, n); CPU_End = chrono::system_clock::now(); cout << "CPU: " << chrono::duration_cast<chrono::nanoseconds>(CPU_End - CPU_Start).count() << "ns." << endl; //chrono::time_point<chrono::system_clock> GPU_Start, GPU_End; //GPU_Start = chrono::system_clock::now(); vecAddGPU(h_A, h_B, h_C, n); //GPU_End = chrono::system_clock::now(); //cout << "GPU: " << chrono::duration_cast<chrono::nanoseconds>(GPU_End - GPU_Start).count() << "ns." << endl; /* for (int i = 0; i < n; i++) { //cout << h_C[i] << " "; sum += h_C[i]; } cout << sum << endl; */ free(h_A); free(h_B); free(h_C); return 0; }
558e36408f05438aedad2d4ba2851fafa15f2643.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "fp_gpu.cuh" __global__ void fp_conv_pool(int idx,bool flag) { int i,j,k,l,m; i=threadIdx.x+blockDim.x*blockIdx.x; j=threadIdx.y+blockDim.y*blockIdx.y; __shared__ float tile[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; if(i<ROW&&j<COL) { if(flag) _input[idx%N_STREAM][i][j]=train_image[idx][i][j]; else _input[idx%N_STREAM][i][j]=test_image[idx][i][j]; __syncthreads(); } if(i<CONV_W_NUM&&j<CONV_SIZE) { for(k=0;k<CONV_SIZE;k++) { tile[i][j][k]=0; for(l=0;l<CONV_W_SIZE;l++) for(m=0;m<CONV_W_SIZE;m++) tile[i][j][k]+=_input[idx%N_STREAM][j+l][k+m]*conv_w[i][l][m]; tile[i][j][k]+=conv_b[i]; tile[i][j][k]=sigmoid(tile[i][j][k]); } __syncthreads(); } if(i<CONV_W_NUM&&j<POOL_SIZE) { for(k=0;k<POOL_SIZE;k++) { float _max=tile[i][j*2][k*2]; _pool_pos[idx%N_STREAM][i][j][k]=0; if(tile[i][j*2][k*2+1]>_max) { _max=tile[i][j*2][k*2+1]; _pool_pos[idx%N_STREAM][i][j][k]=1; } if(tile[i][j*2+1][k*2]>_max) { _max=tile[i][j*2+1][k*2]; _pool_pos[idx%N_STREAM][i][j][k]=2; } if(tile[i][j*2+1][k*2+1]>_max) { _max=tile[i][j*2+1][k*2+1]; _pool_pos[idx%N_STREAM][i][j][k]=3; } _pool[idx%N_STREAM][i][j][k]=_max; } __syncthreads(); } } void fp_conv_pool_gpu(int idx,bool flag) { dim3 block(32,32); dim3 grid(1,1); hipLaunchKernelGGL(( fp_conv_pool), dim3(grid),dim3(block),0,stream[idx%N_STREAM], idx,flag); } __global__ void fp_fc_answer(int idx,bool flag) { int i,j,k,l; i=threadIdx.x+blockDim.x*blockIdx.x; __shared__ float tile1[FC1_SIZE]; __shared__ float tile2[FC2_SIZE]; __shared__ int tile3[FC2_SIZE]; if(i<FC1_SIZE) { tile1[i]=0; for(j=0;j<CONV_W_NUM;j++) for(k=0;k<POOL_SIZE;k++) for(l=0;l<POOL_SIZE;l++) tile1[i]+=_pool[idx%N_STREAM][j][k][l]*fc1_w[i][j][k][l]; tile1[i]+=fc1_b[i]; tile1[i]=sigmoid(tile1[i]); _fc1_a[idx%N_STREAM][i]=tile1[i]; __syncthreads(); } if(i<FC2_SIZE) { tile2[i]=0; for(j=0;j<FC1_SIZE;j++) tile2[i]+=tile1[j]*fc2_w[i][j]; tile2[i]+=fc2_b[i]; tile2[i]=sigmoid(tile2[i]); _fc2_a[idx%N_STREAM][i]=tile2[i]; if(flag) tile3[i]=(train_label[idx]==i)?1:0; else tile3[i]=(test_label[idx]==i)?1:0; __syncthreads(); } if(i==0) { float _max=tile2[0]; int max_pos=0; for(i=0;i<FC2_SIZE;i++) { if(_max<tile2[i]) { _max=tile2[i]; max_pos=i; } } if(tile3[max_pos]) atomicAdd(&correct_cnt,1); for(i=0;i<FC2_SIZE;i++) { _C[idx%N_STREAM][i]=tile2[i]-tile3[i]; atomicExch(&avg_error,avg_error+_C[idx%N_STREAM][i]*_C[idx%N_STREAM][i]*0.5); } } } void fp_fc_answer_gpu(int idx,bool flag) { dim3 block(64); dim3 grid(1); hipLaunchKernelGGL(( fp_fc_answer), dim3(grid),dim3(block),0,stream[idx%N_STREAM], idx,flag); }
558e36408f05438aedad2d4ba2851fafa15f2643.cu
#include "fp_gpu.cuh" __global__ void fp_conv_pool(int idx,bool flag) { int i,j,k,l,m; i=threadIdx.x+blockDim.x*blockIdx.x; j=threadIdx.y+blockDim.y*blockIdx.y; __shared__ float tile[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; if(i<ROW&&j<COL) { if(flag) _input[idx%N_STREAM][i][j]=train_image[idx][i][j]; else _input[idx%N_STREAM][i][j]=test_image[idx][i][j]; __syncthreads(); } if(i<CONV_W_NUM&&j<CONV_SIZE) { for(k=0;k<CONV_SIZE;k++) { tile[i][j][k]=0; for(l=0;l<CONV_W_SIZE;l++) for(m=0;m<CONV_W_SIZE;m++) tile[i][j][k]+=_input[idx%N_STREAM][j+l][k+m]*conv_w[i][l][m]; tile[i][j][k]+=conv_b[i]; tile[i][j][k]=sigmoid(tile[i][j][k]); } __syncthreads(); } if(i<CONV_W_NUM&&j<POOL_SIZE) { for(k=0;k<POOL_SIZE;k++) { float _max=tile[i][j*2][k*2]; _pool_pos[idx%N_STREAM][i][j][k]=0; if(tile[i][j*2][k*2+1]>_max) { _max=tile[i][j*2][k*2+1]; _pool_pos[idx%N_STREAM][i][j][k]=1; } if(tile[i][j*2+1][k*2]>_max) { _max=tile[i][j*2+1][k*2]; _pool_pos[idx%N_STREAM][i][j][k]=2; } if(tile[i][j*2+1][k*2+1]>_max) { _max=tile[i][j*2+1][k*2+1]; _pool_pos[idx%N_STREAM][i][j][k]=3; } _pool[idx%N_STREAM][i][j][k]=_max; } __syncthreads(); } } void fp_conv_pool_gpu(int idx,bool flag) { dim3 block(32,32); dim3 grid(1,1); fp_conv_pool<<<grid,block,0,stream[idx%N_STREAM]>>>(idx,flag); } __global__ void fp_fc_answer(int idx,bool flag) { int i,j,k,l; i=threadIdx.x+blockDim.x*blockIdx.x; __shared__ float tile1[FC1_SIZE]; __shared__ float tile2[FC2_SIZE]; __shared__ int tile3[FC2_SIZE]; if(i<FC1_SIZE) { tile1[i]=0; for(j=0;j<CONV_W_NUM;j++) for(k=0;k<POOL_SIZE;k++) for(l=0;l<POOL_SIZE;l++) tile1[i]+=_pool[idx%N_STREAM][j][k][l]*fc1_w[i][j][k][l]; tile1[i]+=fc1_b[i]; tile1[i]=sigmoid(tile1[i]); _fc1_a[idx%N_STREAM][i]=tile1[i]; __syncthreads(); } if(i<FC2_SIZE) { tile2[i]=0; for(j=0;j<FC1_SIZE;j++) tile2[i]+=tile1[j]*fc2_w[i][j]; tile2[i]+=fc2_b[i]; tile2[i]=sigmoid(tile2[i]); _fc2_a[idx%N_STREAM][i]=tile2[i]; if(flag) tile3[i]=(train_label[idx]==i)?1:0; else tile3[i]=(test_label[idx]==i)?1:0; __syncthreads(); } if(i==0) { float _max=tile2[0]; int max_pos=0; for(i=0;i<FC2_SIZE;i++) { if(_max<tile2[i]) { _max=tile2[i]; max_pos=i; } } if(tile3[max_pos]) atomicAdd(&correct_cnt,1); for(i=0;i<FC2_SIZE;i++) { _C[idx%N_STREAM][i]=tile2[i]-tile3[i]; atomicExch(&avg_error,avg_error+_C[idx%N_STREAM][i]*_C[idx%N_STREAM][i]*0.5); } } } void fp_fc_answer_gpu(int idx,bool flag) { dim3 block(64); dim3 grid(1); fp_fc_answer<<<grid,block,0,stream[idx%N_STREAM]>>>(idx,flag); }
192b4ebe5c266d97487e250cdfa1ce08b6280aea.hip
// !!! This is a file automatically generated by hipify!!! #include "heat3d.h" #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) //////////////////////////////////////////////////////////////////////////////// // A method for checking error in CUDA calls //////////////////////////////////////////////////////////////////////////////// inline void __checkCuda(hipError_t error, const char *file, const int line) { #if defined(DEBUG) || defined(_DEBUG) if (error != hipSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, hipGetErrorString(hipGetLastError())); exit(-1); } #endif return; } //////////////////////////////////////////////////////////////////////////////// // Checks if ECC is enabled on the devices(s) //////////////////////////////////////////////////////////////////////////////// extern "C" void ECCCheck(int number_of_devices) { hipDeviceProp_t properties; for (int i = 0; i < number_of_devices; i++) { checkCuda(hipSetDevice(i)); checkCuda(hipGetDeviceProperties(&properties, i)); if (properties.ECCEnabled == 1) { printf("ECC is turned on for device #%d\n", i); } else { printf("ECC is turned off for device #%d\n", i); } } } //////////////////////////////////////////////////////////////////////////////// // Computes the thread block size //////////////////////////////////////////////////////////////////////////////// extern "C" int getBlock(int n, int block) { return (n+2)/block + ((n+2)%block == 0?0:1); }
192b4ebe5c266d97487e250cdfa1ce08b6280aea.cu
#include "heat3d.h" #define checkCuda(error) __checkCuda(error, __FILE__, __LINE__) //////////////////////////////////////////////////////////////////////////////// // A method for checking error in CUDA calls //////////////////////////////////////////////////////////////////////////////// inline void __checkCuda(cudaError_t error, const char *file, const int line) { #if defined(DEBUG) || defined(_DEBUG) if (error != cudaSuccess) { printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError())); exit(-1); } #endif return; } //////////////////////////////////////////////////////////////////////////////// // Checks if ECC is enabled on the devices(s) //////////////////////////////////////////////////////////////////////////////// extern "C" void ECCCheck(int number_of_devices) { cudaDeviceProp properties; for (int i = 0; i < number_of_devices; i++) { checkCuda(cudaSetDevice(i)); checkCuda(cudaGetDeviceProperties(&properties, i)); if (properties.ECCEnabled == 1) { printf("ECC is turned on for device #%d\n", i); } else { printf("ECC is turned off for device #%d\n", i); } } } //////////////////////////////////////////////////////////////////////////////// // Computes the thread block size //////////////////////////////////////////////////////////////////////////////// extern "C" int getBlock(int n, int block) { return (n+2)/block + ((n+2)%block == 0?0:1); }
4d7838e33340562f27bb43416d11b3fac18412ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_multidim_reduce_kernel; int xdim0_multidim_reduce_kernel_h = -1; __constant__ int ydim0_multidim_reduce_kernel; int ydim0_multidim_reduce_kernel_h = -1; #undef OPS_ACC_MD0 #define OPS_ACC_MD0(d,x,y) ((x)+(xdim0_multidim_reduce_kernel*(y))+(d)*xdim0_multidim_reduce_kernel*ydim0_multidim_reduce_kernel) //user function __device__ void multidim_reduce_kernel_gpu(const double *val, double *redu_dat1) { redu_dat1[0] = redu_dat1[0] + val[OPS_ACC_MD0(0,0,0)]; redu_dat1[1] = redu_dat1[1] + val[OPS_ACC_MD0(1,0,0)]; } #undef OPS_ACC_MD0 __global__ void ops_multidim_reduce_kernel( const double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ double arg1_l[2]; for (int d=0; d<2; d++) arg1_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_reduce_kernel; if (idx_x < size0 && idx_y < size1) { multidim_reduce_kernel_gpu(arg0, arg1_l); } for (int d=0; d<2; d++) ops_reduction_cuda<OPS_INC>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x)*2],arg1_l[d]); } // host stub function #ifndef OPS_LAZY void ops_par_loop_multidim_reduce_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_multidim_reduce_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"multidim_reduce_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != xdim0_multidim_reduce_kernel_h || ydim0 != ydim0_multidim_reduce_kernel_h) { hipMemcpyToSymbol( xdim0_multidim_reduce_kernel, &xdim0, sizeof(int) ); xdim0_multidim_reduce_kernel_h = xdim0; hipMemcpyToSymbol( ydim0_multidim_reduce_kernel, &ydim0, sizeof(int) ); ydim0_multidim_reduce_kernel_h = ydim0; } #ifdef OPS_LAZY ops_block block = desc->block; #endif #ifdef OPS_MPI double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index); #else double *arg1h = (double *)(((ops_reduction)args[1].data)->data); #endif dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*2*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*2); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OPS_reduct_h + reduct_bytes; arg1.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<2; d++) ((double *)arg1.data)[d+b*2] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*2*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z; nshared = MAX(nshared,sizeof(double)*2); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_multidim_reduce_kernel), dim3(grid), dim3(tblock), nshared , 0, (double *)p_a[0], (double *)arg1.data_d,x_size, y_size); cutilSafeCall(hipGetLastError()); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<2; d++ ){ arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*2]; } } arg1.data = (char *)arg1h; if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_multidim_reduce_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_multidim_reduce_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"multidim_reduce_kernel"); } ops_enqueue_kernel(desc); } #endif
4d7838e33340562f27bb43416d11b3fac18412ac.cu
// // auto-generated by ops.py // __constant__ int xdim0_multidim_reduce_kernel; int xdim0_multidim_reduce_kernel_h = -1; __constant__ int ydim0_multidim_reduce_kernel; int ydim0_multidim_reduce_kernel_h = -1; #undef OPS_ACC_MD0 #define OPS_ACC_MD0(d,x,y) ((x)+(xdim0_multidim_reduce_kernel*(y))+(d)*xdim0_multidim_reduce_kernel*ydim0_multidim_reduce_kernel) //user function __device__ void multidim_reduce_kernel_gpu(const double *val, double *redu_dat1) { redu_dat1[0] = redu_dat1[0] + val[OPS_ACC_MD0(0,0,0)]; redu_dat1[1] = redu_dat1[1] + val[OPS_ACC_MD0(1,0,0)]; } #undef OPS_ACC_MD0 __global__ void ops_multidim_reduce_kernel( const double* __restrict arg0, double* __restrict arg1, int size0, int size1 ){ double arg1_l[2]; for (int d=0; d<2; d++) arg1_l[d] = ZERO_double; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_reduce_kernel; if (idx_x < size0 && idx_y < size1) { multidim_reduce_kernel_gpu(arg0, arg1_l); } for (int d=0; d<2; d++) ops_reduction_cuda<OPS_INC>(&arg1[d+(blockIdx.x + blockIdx.y*gridDim.x)*2],arg1_l[d]); } // host stub function #ifndef OPS_LAZY void ops_par_loop_multidim_reduce_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { #else void ops_par_loop_multidim_reduce_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; #endif //Timing double t1,t2,c1,c2; ops_arg args[2] = { arg0, arg1}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,2,range,2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2,"multidim_reduce_kernel"); OPS_kernels[2].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; if (xdim0 != xdim0_multidim_reduce_kernel_h || ydim0 != ydim0_multidim_reduce_kernel_h) { cudaMemcpyToSymbol( xdim0_multidim_reduce_kernel, &xdim0, sizeof(int) ); xdim0_multidim_reduce_kernel_h = xdim0; cudaMemcpyToSymbol( ydim0_multidim_reduce_kernel, &ydim0, sizeof(int) ); ydim0_multidim_reduce_kernel_h = ydim0; } #ifdef OPS_LAZY ops_block block = desc->block; #endif #ifdef OPS_MPI double *arg1h = (double *)(((ops_reduction)args[1].data)->data + ((ops_reduction)args[1].data)->size * block->index); #else double *arg1h = (double *)(((ops_reduction)args[1].data)->data); #endif dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int nblocks = ((x_size-1)/OPS_block_size_x+ 1)*((y_size-1)/OPS_block_size_y + 1); int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*2*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)*2); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg1.data = OPS_reduct_h + reduct_bytes; arg1.data_d = OPS_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<2; d++) ((double *)arg1.data)[d+b*2] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*2*sizeof(double)); mvReductArraysToDevice(reduct_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); char *p_a[2]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 2); ops_halo_exchanges(args,2,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; } int nshared = 0; int nthread = OPS_block_size_x*OPS_block_size_y*OPS_block_size_z; nshared = MAX(nshared,sizeof(double)*2); nshared = MAX(nshared*nthread,reduct_size*nthread); //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_multidim_reduce_kernel<<<grid, tblock, nshared >>> ( (double *)p_a[0], (double *)arg1.data_d,x_size, y_size); cutilSafeCall(cudaGetLastError()); mvReductArraysToHost(reduct_bytes); for ( int b=0; b<maxblocks; b++ ){ for ( int d=0; d<2; d++ ){ arg1h[d] = arg1h[d] + ((double *)arg1.data)[d+b*2]; } } arg1.data = (char *)arg1h; if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[2].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 2); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[2].mpi_time += t2-t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } } #ifdef OPS_LAZY void ops_par_loop_multidim_reduce_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 2; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 2; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 2; desc->args = (ops_arg*)malloc(2*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->function = ops_par_loop_multidim_reduce_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(2,"multidim_reduce_kernel"); } ops_enqueue_kernel(desc); } #endif
850dcd154dac37d38446a9e0dd9a56815cc36ef8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<math.h> #include<stdio.h> const double EPSILON = 1.0e-15; const double a = 1.23; const double b = 2.34; const double c = 3.57; void __global__ add(const double *x, const double *y, double *z); void check(const double *z, const int N); int main() { const int N = 100000000; const int M = sizeof(double) * N; double *h_x = new double[M]; double *h_y = new double[M]; double *h_z = new double[M]; for (int n = 0; n < N; n++) { h_x[n] = a; h_y[n] = b; } double *d_x, *d_y, *d_z; hipMalloc((void **) &d_x, M); hipMalloc((void **) &d_y, M); hipMalloc((void **) &d_z, M); hipMemcpy(d_x, h_x, M, hipMemcpyHostToDevice); hipMemcpy(d_y, h_y, M, hipMemcpyHostToDevice); const int block_size = 128; // const int grid_size = N / block_size; // const int grid_size = (N-1) / block_size + 1; // hipLaunchKernelGGL(( add), dim3(grid_size), dim3(block_size), 0, 0, d_x, d_y, d_z); hipMemcpy(h_z, d_z, M, hipMemcpyDeviceToHost); check(h_z, N); free(h_x); free(h_y); free(h_z); hipFree(d_x); hipFree(d_y); hipFree(d_z); return 0; } void __global__ add(const double *x, const double *y, double *z, const int N){ // const int n = blockDim.x * blockIdx.x + threadIdx.x; if(n < N){ // z[n] = x[n] + y[n]; } } void check(const double *z, const int N){ bool has_error = false; for(int n=0;n<N;n++){ if(fabs(z[n]-c)>EPSILON){ has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
850dcd154dac37d38446a9e0dd9a56815cc36ef8.cu
#include<math.h> #include<stdio.h> const double EPSILON = 1.0e-15; const double a = 1.23; const double b = 2.34; const double c = 3.57; void __global__ add(const double *x, const double *y, double *z); void check(const double *z, const int N); int main() { const int N = 100000000; const int M = sizeof(double) * N; double *h_x = new double[M]; double *h_y = new double[M]; double *h_z = new double[M]; for (int n = 0; n < N; n++) { h_x[n] = a; h_y[n] = b; } double *d_x, *d_y, *d_z; cudaMalloc((void **) &d_x, M); cudaMalloc((void **) &d_y, M); cudaMalloc((void **) &d_z, M); cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, M, cudaMemcpyHostToDevice); const int block_size = 128; // const int grid_size = N / block_size; // 整除 const int grid_size = (N-1) / block_size + 1; // 不整除 add<<<grid_size, block_size>>>(d_x, d_y, d_z); cudaMemcpy(h_z, d_z, M, cudaMemcpyDeviceToHost); check(h_z, N); free(h_x); free(h_y); free(h_z); cudaFree(d_x); cudaFree(d_y); cudaFree(d_z); return 0; } void __global__ add(const double *x, const double *y, double *z, const int N){ // 单指令,多线程 const int n = blockDim.x * blockIdx.x + threadIdx.x; if(n < N){ // 避免非法的内存操作 z[n] = x[n] + y[n]; } } void check(const double *z, const int N){ bool has_error = false; for(int n=0;n<N;n++){ if(fabs(z[n]-c)>EPSILON){ has_error = true; } } printf("%s\n", has_error ? "Has errors" : "No errors"); }
1873eba89aa4f765236eb55a0b6728ce8b12ce29.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda_runtime.h> int main(){ int ** ptr; ptr = (int**)malloc(10*sizeof(int*)); int ** tmp; hipMalloc((void**)&temp, 10*sizeof(int*)); hipMemcpy(ptr, tmp, 10*sizeof(int*), hipMemcpyDeviceToHost);
1873eba89aa4f765236eb55a0b6728ce8b12ce29.cu
#include<cuda_runtime.h> int main(){ int ** ptr; ptr = (int**)malloc(10*sizeof(int*)); int ** tmp; cudaMalloc((void**)&temp, 10*sizeof(int*)); cudaMemcpy(ptr, tmp, 10*sizeof(int*), cudaMemcpyDeviceToHost);
daf97cd18d325bda6c10bcfb7af82a4c1381793f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <defaults.hpp> #include <cuco/detail/utils.hpp> #include <cuco/hash_functions.cuh> #include <nvbench/nvbench.cuh> #include <thrust/device_vector.h> #include <cstdint> template <int32_t Words> struct large_key { constexpr __host__ __device__ large_key(int32_t seed) noexcept { #pragma unroll Words for (int32_t i = 0; i < Words; ++i) { data_[i] = seed; } } private: int32_t data_[Words]; }; template <int32_t BlockSize, typename Hasher, typename OutputIt> __global__ void hash_bench_kernel(Hasher hash, cuco::detail::index_type n, OutputIt out, bool materialize_result) { cuco::detail::index_type const gid = BlockSize * blockIdx.x + threadIdx.x; cuco::detail::index_type const loop_stride = gridDim.x * BlockSize; cuco::detail::index_type idx = gid; typename Hasher::result_type agg = 0; while (idx < n) { typename Hasher::argument_type key(idx); for (int32_t i = 0; i < 100; ++i) { // execute hash func 100 times agg += hash(key); } idx += loop_stride; } if (materialize_result) { out[gid] = agg; } } /** * @brief A benchmark evaluating performance of various hash functions */ template <typename Hash> void hash_eval(nvbench::state& state, nvbench::type_list<Hash>) { bool const materialize_result = false; constexpr auto block_size = 128; auto const num_keys = state.get_int64_or_default("NumInputs", cuco::benchmark::defaults::N * 10); auto const grid_size = SDIV(num_keys, block_size * 16); thrust::device_vector<typename Hash::result_type> hash_values((materialize_result) ? num_keys : 1); state.add_element_count(num_keys); state.exec([&](nvbench::launch& launch) { hipLaunchKernelGGL(( hash_bench_kernel<block_size>), dim3(grid_size), dim3(block_size), 0, launch.get_stream(), Hash{}, num_keys, hash_values.begin(), materialize_result); }); } NVBENCH_BENCH_TYPES( hash_eval, NVBENCH_TYPE_AXES(nvbench::type_list<cuco::murmurhash3_32<nvbench::int32_t>, cuco::murmurhash3_32<nvbench::int64_t>, cuco::murmurhash3_32<large_key<32>>, // 32*4bytes cuco::xxhash_32<nvbench::int32_t>, cuco::xxhash_32<nvbench::int64_t>, cuco::xxhash_32<large_key<32>>, cuco::xxhash_64<nvbench::int32_t>, cuco::xxhash_64<nvbench::int64_t>, cuco::xxhash_64<large_key<32>>, cuco::murmurhash3_fmix_32<nvbench::int32_t>, cuco::murmurhash3_fmix_64<nvbench::int64_t>>)) .set_name("hash_function_eval") .set_type_axes_names({"Hash"}) .set_max_noise(cuco::benchmark::defaults::MAX_NOISE);
daf97cd18d325bda6c10bcfb7af82a4c1381793f.cu
/* * Copyright (c) 2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <defaults.hpp> #include <cuco/detail/utils.hpp> #include <cuco/hash_functions.cuh> #include <nvbench/nvbench.cuh> #include <thrust/device_vector.h> #include <cstdint> template <int32_t Words> struct large_key { constexpr __host__ __device__ large_key(int32_t seed) noexcept { #pragma unroll Words for (int32_t i = 0; i < Words; ++i) { data_[i] = seed; } } private: int32_t data_[Words]; }; template <int32_t BlockSize, typename Hasher, typename OutputIt> __global__ void hash_bench_kernel(Hasher hash, cuco::detail::index_type n, OutputIt out, bool materialize_result) { cuco::detail::index_type const gid = BlockSize * blockIdx.x + threadIdx.x; cuco::detail::index_type const loop_stride = gridDim.x * BlockSize; cuco::detail::index_type idx = gid; typename Hasher::result_type agg = 0; while (idx < n) { typename Hasher::argument_type key(idx); for (int32_t i = 0; i < 100; ++i) { // execute hash func 100 times agg += hash(key); } idx += loop_stride; } if (materialize_result) { out[gid] = agg; } } /** * @brief A benchmark evaluating performance of various hash functions */ template <typename Hash> void hash_eval(nvbench::state& state, nvbench::type_list<Hash>) { bool const materialize_result = false; constexpr auto block_size = 128; auto const num_keys = state.get_int64_or_default("NumInputs", cuco::benchmark::defaults::N * 10); auto const grid_size = SDIV(num_keys, block_size * 16); thrust::device_vector<typename Hash::result_type> hash_values((materialize_result) ? num_keys : 1); state.add_element_count(num_keys); state.exec([&](nvbench::launch& launch) { hash_bench_kernel<block_size><<<grid_size, block_size, 0, launch.get_stream()>>>( Hash{}, num_keys, hash_values.begin(), materialize_result); }); } NVBENCH_BENCH_TYPES( hash_eval, NVBENCH_TYPE_AXES(nvbench::type_list<cuco::murmurhash3_32<nvbench::int32_t>, cuco::murmurhash3_32<nvbench::int64_t>, cuco::murmurhash3_32<large_key<32>>, // 32*4bytes cuco::xxhash_32<nvbench::int32_t>, cuco::xxhash_32<nvbench::int64_t>, cuco::xxhash_32<large_key<32>>, cuco::xxhash_64<nvbench::int32_t>, cuco::xxhash_64<nvbench::int64_t>, cuco::xxhash_64<large_key<32>>, cuco::murmurhash3_fmix_32<nvbench::int32_t>, cuco::murmurhash3_fmix_64<nvbench::int64_t>>)) .set_name("hash_function_eval") .set_type_axes_names({"Hash"}) .set_max_noise(cuco::benchmark::defaults::MAX_NOISE);
07aacefd8760ccaa35812bc4a90396b87264a4d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void transpose_v3(float* a,float* b, int n){ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int i = bx*BX + tx; int j = by*BY + ty; __shared__ float tile[BY][BX+1]; //Very slight modification to avoid bank conflict in shared mem if(i >= n || j >= n) return; tile[ty][tx] = a[j*n+i]; __syncthreads(); i = by*BY + tx; j = bx*BX + ty; b[j*n+i] = tile[tx][ty]; }
07aacefd8760ccaa35812bc4a90396b87264a4d6.cu
#include "includes.h" __global__ void transpose_v3(float* a,float* b, int n){ int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int i = bx*BX + tx; int j = by*BY + ty; __shared__ float tile[BY][BX+1]; //Very slight modification to avoid bank conflict in shared mem if(i >= n || j >= n) return; tile[ty][tx] = a[j*n+i]; __syncthreads(); i = by*BY + tx; j = bx*BX + ty; b[j*n+i] = tile[tx][ty]; }
4d074dabbe39c91e2ffa7781cbf11021d0b2746a.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "voxel_backprojection2.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, hipTextureType2DLayered , hipReadModeElementType> tex; __global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){ size_t idx = threadIdx.x + blockIdx.x * blockDim.x; for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) { image[idx]*=constant; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) Point3D projParamsArray2Host[7*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArray2Dev[3*PROJ_PER_KERNEL]; float projSinCosArray2Host[3*PROJ_PER_KERNEL]; // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections) { // Old kernel call signature: //hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block), 0, 0, geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we dont go out of bounds if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes int colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArray2Dev[7*projNumber+1]; Point3D deltaZ = projParamsArray2Dev[7*projNumber+2]; Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3]; Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4]; Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5]; Point3D S = projParamsArray2Dev[7*projNumber+6]; float sinalpha = projSinCosArray2Dev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArray2Dev[3*projNumber+1]; float COR = projSinCosArray2Dev[3*projNumber+2]; // Precomputations for the weights: //Real coords of Source // We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate Point3D realS; realS.x= geo.DSO*cosalpha; realS.y=-geo.DSO*sinalpha; realS.z=0; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2; v=z+geo.nDetecV/2; float sample=tex2DLayered(tex, v , // u and v seem swaped, but this is due to the row/column major u , indAlpha); float weigth=0; // // // // IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up?? // //Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes. Point3D realvoxel; realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y; realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z; // Real XYZ coordinates of Detector. Point3D realD, realDaux; // We know the index of the detector (u,v). Start from there. realDaux.x=-(geo.DSD-geo.DSO); realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x; realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y; //rotate the detector realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x) realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x) float L=0,l=0; L = sqrt( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always. l = sqrt( (realS.x-realvoxel.x)*(realS.x-realvoxel.x) + (realS.y-realvoxel.y)*(realS.y-realvoxel.y) + (realS.z-realvoxel.z)*(realS.z-realvoxel.z)); weigth=L*L*L/(geo.DSD*l*l); // Get Value in the computed (U,V) and multiply by the corresponding weigth. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) voxelColumn[colIdx]+=sample* weigth; } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha) { /* * Allocate texture memory on the device */ // copy data to CUDA memory hipArray *d_projectiondata = 0; const hipExtent extent = make_hipExtent(geo.nDetecV,geo.nDetecU,nalpha); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_projectiondata, &channelDesc, extent,hipArrayLayered); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; hipMalloc((void**)&dimage, num_bytes); hipMemset(dimage,0,num_bytes); cudaCheckErrors("hipMalloc fail"); // If we are going to time bool timekernel=false; hipEvent_t start, stop; float elapsedTime; if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++) { // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it int j; for(j=0; j<PROJ_PER_KERNEL; j++) { int currProjNumber=i*PROJ_PER_KERNEL+j; if(currProjNumber>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source; float sinalpha,cosalpha; geo.alpha=-alphas[currProjNumber*3]; sinalpha=sin(geo.alpha); cosalpha=cos(geo.alpha); projSinCosArray2Host[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArray2Host[3*j+1]=cosalpha; projSinCosArray2Host[3*j+2]=geo.COR[currProjNumber]; computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber]; offOrig.y=geo.offOrigY[currProjNumber]; offOrig.y=geo.offOrigZ[currProjNumber]; offDetec.x=geo.offDetecU[currProjNumber]; offDetec.y=geo.offDetecV[currProjNumber]; projParamsArray2Host[7*j]=deltaX; // 7*j because we have 7 Point3D values per projection projParamsArray2Host[7*j+1]=deltaY; projParamsArray2Host[7*j+2]=deltaZ; projParamsArray2Host[7*j+3]=xyzOrigin; projParamsArray2Host[7*j+4]=offOrig; projParamsArray2Host[7*j+5]=offDetec; projParamsArray2Host[7*j+6]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel hipMemcpyToSymbol(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*3*PROJ_PER_KERNEL); hipMemcpyToSymbol(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL); hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block), 0, 0, geo,dimage,i,nalpha); cudaCheckErrors("Kernel fail"); } // END for hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS), 0, 0, geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV)); ////////////////////////////////////////////////////////////////////////////////////// // END Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// if (timekernel) { hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy result fail"); hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dimage); hipFreeArray(d_projectiondata); cudaCheckErrors("hipFree d_imagedata fail"); hipDeviceReset(); return 0; } // END voxel_backprojection //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ #ifndef BACKPROJECTION_HPP void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P0, Px0,Py0,Pz0, source; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD-geo.DSO); Px.x=Px.x+(geo.DSD-geo.DSO); Py.x=Py.x+(geo.DSD-geo.DSO); Pz.x=Pz.x+(geo.DSD-geo.DSO); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD-geo.DSO); Px.x=Px.x-(geo.DSD-geo.DSO); Py.x=Py.x-(geo.DSD-geo.DSO); Pz.x=Pz.x-(geo.DSD-geo.DSO); //Done for P, now source source.x=geo.DSD; //allready offseted for rotation of teh detector source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD-geo.DSO); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } // END computeDeltasCube void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } #endif
4d074dabbe39c91e2ffa7781cbf11021d0b2746a.cu
/*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "voxel_backprojection2.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, cudaTextureType2DLayered , cudaReadModeElementType> tex; __global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){ size_t idx = threadIdx.x + blockIdx.x * blockDim.x; for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) { image[idx]*=constant; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) Point3D projParamsArray2Host[7*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArray2Dev[3*PROJ_PER_KERNEL]; float projSinCosArray2Host[3*PROJ_PER_KERNEL]; // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections) { // Old kernel call signature: // kernelPixelBackprojection<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we dont go out of bounds if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes int colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArray2Dev[7*projNumber+1]; Point3D deltaZ = projParamsArray2Dev[7*projNumber+2]; Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3]; Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4]; Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5]; Point3D S = projParamsArray2Dev[7*projNumber+6]; float sinalpha = projSinCosArray2Dev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArray2Dev[3*projNumber+1]; float COR = projSinCosArray2Dev[3*projNumber+2]; // Precomputations for the weights: //Real coords of Source // We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate Point3D realS; realS.x= geo.DSO*cosalpha; realS.y=-geo.DSO*sinalpha; realS.z=0; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2; v=z+geo.nDetecV/2; float sample=tex2DLayered(tex, v , // u and v seem swaped, but this is due to the row/column major u , indAlpha); float weigth=0; // // // // IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up?? // //Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes. Point3D realvoxel; realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y; realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z; // Real XYZ coordinates of Detector. Point3D realD, realDaux; // We know the index of the detector (u,v). Start from there. realDaux.x=-(geo.DSD-geo.DSO); realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x; realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y; //rotate the detector realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x) realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x) float L=0,l=0; L = sqrt( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always. l = sqrt( (realS.x-realvoxel.x)*(realS.x-realvoxel.x) + (realS.y-realvoxel.y)*(realS.y-realvoxel.y) + (realS.z-realvoxel.z)*(realS.z-realvoxel.z)); weigth=L*L*L/(geo.DSD*l*l); // Get Value in the computed (U,V) and multiply by the corresponding weigth. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) voxelColumn[colIdx]+=sample* weigth; } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha) { /* * Allocate texture memory on the device */ // copy data to CUDA memory cudaArray *d_projectiondata = 0; const cudaExtent extent = make_cudaExtent(geo.nDetecV,geo.nDetecU,nalpha); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent,cudaArrayLayered); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; cudaMalloc((void**)&dimage, num_bytes); cudaMemset(dimage,0,num_bytes); cudaCheckErrors("cudaMalloc fail"); // If we are going to time bool timekernel=false; cudaEvent_t start, stop; float elapsedTime; if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++) { // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it int j; for(j=0; j<PROJ_PER_KERNEL; j++) { int currProjNumber=i*PROJ_PER_KERNEL+j; if(currProjNumber>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source; float sinalpha,cosalpha; geo.alpha=-alphas[currProjNumber*3]; sinalpha=sin(geo.alpha); cosalpha=cos(geo.alpha); projSinCosArray2Host[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArray2Host[3*j+1]=cosalpha; projSinCosArray2Host[3*j+2]=geo.COR[currProjNumber]; computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber]; offOrig.y=geo.offOrigY[currProjNumber]; offOrig.y=geo.offOrigZ[currProjNumber]; offDetec.x=geo.offDetecU[currProjNumber]; offDetec.y=geo.offDetecV[currProjNumber]; projParamsArray2Host[7*j]=deltaX; // 7*j because we have 7 Point3D values per projection projParamsArray2Host[7*j+1]=deltaY; projParamsArray2Host[7*j+2]=deltaZ; projParamsArray2Host[7*j+3]=xyzOrigin; projParamsArray2Host[7*j+4]=offOrig; projParamsArray2Host[7*j+5]=offDetec; projParamsArray2Host[7*j+6]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel cudaMemcpyToSymbol(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*3*PROJ_PER_KERNEL); cudaMemcpyToSymbol(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL); kernelPixelBackprojection<<<grid,block>>>(geo,dimage,i,nalpha); cudaCheckErrors("Kernel fail"); } // END for matrixConstantMultiply<<<60,MAXTREADS>>>( geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV)); ////////////////////////////////////////////////////////////////////////////////////// // END Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// if (timekernel) { cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy result fail"); cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dimage); cudaFreeArray(d_projectiondata); cudaCheckErrors("cudaFree d_imagedata fail"); cudaDeviceReset(); return 0; } // END voxel_backprojection //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ #ifndef BACKPROJECTION_HPP void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P0, Px0,Py0,Pz0, source; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD-geo.DSO); Px.x=Px.x+(geo.DSD-geo.DSO); Py.x=Py.x+(geo.DSD-geo.DSO); Pz.x=Pz.x+(geo.DSD-geo.DSO); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD-geo.DSO); Px.x=Px.x-(geo.DSD-geo.DSO); Py.x=Py.x-(geo.DSD-geo.DSO); Pz.x=Pz.x-(geo.DSD-geo.DSO); //Done for P, now source source.x=geo.DSD; //allready offseted for rotation of teh detector source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD-geo.DSO); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } // END computeDeltasCube void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } #endif
benchmark.hip
// !!! This is a file automatically generated by hipify!!! #include "nbody.cuh" #define ITERS 100 int main(int argc, char** argv){ hipEvent_t start, stop; float elapsedTime; initCUDA(); hipEventCreate(&start); hipEventRecord(start,0); // execute kernel for (int j = 0; j < ITERS; j++) runKernelNBodySimulation(); hipDeviceSynchronize(); hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); printf("Running %d kernels\n", ITERS); printf("Elapsed time : %f ms\n" ,elapsedTime); double dSeconds = elapsedTime/(1000.0); double gflops = N_SIZE * N_SIZE/dSeconds/1.0e9 * ITERS ; printf("Throughput: %f GFLOPS\n" ,gflops); hipDeviceReset(); }
benchmark.cu
#include "nbody.cuh" #define ITERS 100 int main(int argc, char** argv){ cudaEvent_t start, stop; float elapsedTime; initCUDA(); cudaEventCreate(&start); cudaEventRecord(start,0); // execute kernel for (int j = 0; j < ITERS; j++) runKernelNBodySimulation(); cudaThreadSynchronize(); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Running %d kernels\n", ITERS); printf("Elapsed time : %f ms\n" ,elapsedTime); double dSeconds = elapsedTime/(1000.0); double gflops = N_SIZE * N_SIZE/dSeconds/1.0e9 * ITERS ; printf("Throughput: %f GFLOPS\n" ,gflops); cudaThreadExit(); }
9a990fa84828f31543befd0e6d838a5efbf9d3d4.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
9a990fa84828f31543befd0e6d838a5efbf9d3d4.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
f4c914f623c050c049cd979a227838f3e0e961fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /***************************************************************************//** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_clat2z( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { hipLaunchKernelGGL(( clat2z_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { hipLaunchKernelGGL(( clat2z_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, SA, ldsa, A, lda); } }
f4c914f623c050c049cd979a227838f3e0e961fe.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds @author Mark Gates */ #include "magma_internal.h" #define BLK_X 64 #define BLK_Y 32 /* Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks. Each block has BLK_X threads. Each thread loops across one row, updating BLK_Y entries. Updates only the diagonal and below. Blocks that are fully above the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_lower( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (below diag) */ bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y)); /* do only rows inside matrix, and blocks not above diag */ if ( ind < n && ind + BLK_X > iby ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } /* Similar to clat2z_full, but updates only the diagonal and above. Blocks that are fully below the diagonal exit immediately. Code similar to zlag2c and zlaset. */ __global__ void clat2z_upper( int n, const magmaFloatComplex *SA, int ldsa, magmaDoubleComplex *A, int lda ) { int ind = blockIdx.x*BLK_X + threadIdx.x; int iby = blockIdx.y*BLK_Y; /* check if full block-column && (above diag) */ bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby)); /* do only rows inside matrix, and blocks not below diag */ if ( ind < n && ind < iby + BLK_Y ) { A += ind + iby*lda; SA += ind + iby*ldsa; if ( full ) { // full block-column, off-diagonal block #pragma unroll for( int j=0; j < BLK_Y; ++j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } else { // either partial block-column or diagonal block for( int j=0; j < BLK_Y && iby+j < n; ++j ) { if ( ind <= iby+j ) { A[j*lda] = MAGMA_Z_MAKE( MAGMA_C_REAL( SA[j*ldsa] ), MAGMA_C_IMAG( SA[j*ldsa] ) ); } } } } } /***************************************************************************//** Purpose ------- CLAT2Z converts a single-complex matrix, SA, to a double-complex matrix, A. Note that while it is possible to overflow while converting from double to single, it is not possible to overflow when converting from single to double. Arguments --------- @param[in] uplo magma_uplo_t Specifies the part of the matrix A to be converted. - = MagmaUpper: Upper triangular part - = MagmaLower: Lower triangular part @param[in] n INTEGER The number of columns of the matrix A. n >= 0. @param[in] A COMPLEX_16 array, dimension (LDA,n) On entry, the n-by-n coefficient matrix A. @param[in] lda INTEGER The leading dimension of the array A. LDA >= max(1,n). @param[out] SA COMPLEX array, dimension (LDSA,n) On exit, if INFO=0, the n-by-n coefficient matrix SA; if INFO > 0, the content of SA is unspecified. @param[in] ldsa INTEGER The leading dimension of the array SA. LDSA >= max(1,n). @param[out] info INTEGER - = 0: successful exit. - < 0: if INFO = -i, the i-th argument had an illegal value @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lat2 *******************************************************************************/ extern "C" void magmablas_clat2z( magma_uplo_t uplo, magma_int_t n, magmaFloatComplex_const_ptr SA, magma_int_t ldsa, magmaDoubleComplex_ptr A, magma_int_t lda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( uplo != MagmaLower && uplo != MagmaUpper ) *info = -1; else if ( n < 0 ) *info = -2; else if ( lda < max(1,n) ) *info = -4; else if ( ldsa < max(1,n) ) *info = -6; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //*info; } /* quick return */ if ( n == 0 ) { return; } dim3 threads( BLK_X, 1 ); dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) ); if (uplo == MagmaLower) { clat2z_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } else if (uplo == MagmaUpper) { clat2z_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, SA, ldsa, A, lda); } }
347b512c6c960b00450c65883a39c44998482a07.hip
// !!! This is a file automatically generated by hipify!!! ////---------------------------------------- // two-dimensional Fokker-Planck equation // //---------------------------------------- // parameters of simulations are in "input_FP.par" // // compile: // nvcc fokker-planck_2d.cu -lcuda -lcufft -lcublas -O3 -arch sm_20 -o fp2d // // produces output files "n_#####.dat" with (i, j, n, S) columns // // // // uses complex.h // and semi-implicit algorithm #include<iostream> #include<fstream> #include<cstring> #include<stdlib.h> #include<math.h> #include <iomanip> #include <cmath> #include <queue> /* we need these includes for CUDA's random number stuff */ #include <unistd.h> #include <stdio.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> //#include "fileutils.h" //#include "stringutils.h" // #include<complex.h> // #include<fftw3.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <rocblas.h> #include <hip/hip_runtime.h> // #include <cutil_inline.h> // #include "reduction_kernel.cu" #define N 256// was 256 #define NX (N*N) using namespace std; //#define DBLPREC #ifdef DBLPREC #define MAXT 512 //was 512 typedef double REAL; #else #define MAXT 128 //was 1024 typedef float REAL; #endif typedef struct { REAL re; REAL im; } COMPLEX; /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// /* this GPU kernel function is used to initialize the random states */ __global__ void init(unsigned int seed, hiprandState_t* states) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; /* we have to initialize the state */ if(idx<NX) { hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ idx, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[idx]); } } typedef struct { // parameters REAL k1dt, k2dt, k3dt, k4dt, k5dt, k6dt, KM1, KM2, KM3, KM4, Df, alpha, beta, St; } sysvar; //=========================================================================== double rn() { return drand48(); } //=========================================================================== //CUDA Kernels //=========================================================================== __global__ void r2c(COMPLEX *cS, REAL *S, REAL *S1 ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { cS[idx].re=S[idx]; cS[idx].im=S1[idx]; } }; __global__ void r2c1(COMPLEX *cS, REAL *S ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { cS[idx].re=S[idx]; cS[idx].im=0.; } }; __global__ void c2r(COMPLEX *cS, REAL *S, REAL *S1 ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { S[idx]=cS[idx].re; S1[idx]=cS[idx].im; } }; /* this GPU kernel takes an array of states, and an array of ints, and * puts a random int into each */ __global__ void randoms(hiprandState_t* states, REAL * numbers) { /* hiprand works like rand - except that it takes a state as a parameter */ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { numbers[idx] = hiprand_uniform(&states[idx]); } } //--------------------------------------------------------------------------- //update P //--------------------------------------------------------------------------- __global__ void flux(REAL *H, REAL *S, REAL *P, REAL *P1, sysvar* cu_Vars) { REAL h, s, h2, s2, h4, s4; REAL Ahp, Asp, hp, sp, hp2, sp2, hp4, sp4; REAL Ahm, Asm, hm, sm, hm2, sm2, hm4, sm4; REAL Dhm, Dsm, Dhp, Dsp; int i,j; // read parameters REAL k1dt = cu_Vars[0].k1dt; REAL k2dt = cu_Vars[0].k2dt; REAL k3dt = cu_Vars[0].k3dt; REAL k4dt = cu_Vars[0].k4dt; REAL k5dt = cu_Vars[0].k5dt; REAL k6dt = cu_Vars[0].k6dt; REAL KM1 = cu_Vars[0].KM1; REAL KM2 = cu_Vars[0].KM2; REAL KM3 = cu_Vars[0].KM3; REAL KM4 = cu_Vars[0].KM4; REAL alpha = cu_Vars[0].alpha; REAL beta = cu_Vars[0].beta; REAL St = cu_Vars[0].St; REAL Df = cu_Vars[0].Df; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; REAL factor=St/15000; if(idx>=0 && idx<NX) { h=H[idx]; s=factor*S[idx]; h2=h*h; s2=s*s; h4=h2*h2; s4=s2*s2; i=(int)(idx/N); j=idx-i*N; hp=H[j+N*(i+1)]; hm=H[j+N*(i-1)]; hp2=hp*hp; hp4=hp2*hp2; hm2=hm*hm; hm4=hm2*hm2; sp=factor*S[j+1+N*i]; sm=factor*S[j-1+N*i]; sp2=sp*sp; sp4=sp2*sp2; sm2=sm*sm; sm4=sm2*sm2; if(i>0&&i<N-1){ Ahp=P[j+N*(i+1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hp4/(hp4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hp); Ahm=P[j+N*(i-1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hm4/(hm4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hm); Dhp=P[j+N*(i+1)]-P[j+N*i]; Dhm=P[j+N*(i-1)]-P[j+N*i]; } else if (i==0){ Ahp=P[j+N*(i+1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hp4/(hp4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hp); Ahm=-P[j+N*(i)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*h4/(h4+KM2*KM2*KM2*KM2)+k2dt-k3dt*h); Dhp=P[j+N*(i+1)]-P[j+N*i]; Dhm=0; } else if (i==N-1){ Ahp=-P[j+N*(i)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*h4/(h4+KM2*KM2*KM2*KM2)+k2dt-k3dt*h); Ahm=P[j+N*(i-1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hm4/(hm4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hm); Dhp=0; Dhm=P[j+N*(i-1)]-P[j+N*i]; } if(j>0&&j<N-1){ Asp=P[j+1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sp4/(sp4+KM4*KM4*KM4*KM4)*(St-sp)+k5dt-k6dt*sp); Asm=P[j-1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sm4/(sm4+KM4*KM4*KM4*KM4)*(St-sm)+k5dt-k6dt*sm); Dsp=P[j+1+N*i]-P[j+N*i]; Dsm=P[j-1+N*i]-P[j+N*i]; } else if (j==0) { Asp=P[j+1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sp4/(sp4+KM4*KM4*KM4*KM4)*(St-sp)+k5dt-k6dt*sp); Asm=-P[j+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*s4/(s4+KM4*KM4*KM4*KM4)*(St-s)+k5dt-k6dt*s); Dsp=P[j+1+N*i]-P[j+N*i]; Dsm=0; } else if (j==N-1) { Asp=-P[j+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*s4/(s4+KM4*KM4*KM4*KM4)*(St-s)+k5dt-k6dt*s); Asm=P[j-1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sm4/(sm4+KM4*KM4*KM4*KM4)*(St-sm)+k5dt-k6dt*sm); Dsp=0; Dsm=P[j-1+N*i]-P[j+N*i]; } //P1[idx]=P[idx]-Ahp+Ahm-Asp+Asm+Df*(Dhp+Dhm+Dsp+Dsm); P1[idx]=P[idx]-Ahp+Ahm-Asp+Asm+Df*(Dhp+Dhm)+0.01*Df*(Dsp+Dsm); if(P1[idx]<0) P1[idx]=0; } }; __global__ void combine(COMPLEX *GNdFx, COMPLEX *GNdFy, COMPLEX *GNdF) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { GNdF[idx].re=GNdFx[idx].re+GNdFy[idx].re; GNdF[idx].im=GNdFx[idx].im+GNdFy[idx].im; } }; __global__ void copy_P(REAL *n, REAL *na) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { na[idx]=n[idx]; } } //--------------------------------------------------------------------------- //--------------------------------------------------------------------------- int main(int argc, char** argv) { dim3 dGRID,dBLOCK; int GPUID=0; REAL *H,*GH; REAL *S,*GS; REAL *R,*GR; REAL *P, *P1, *GP, *GP1, *GCT1, *GCT2; COMPLEX *GcP, *GcFx, *GcFy, *GNcP, *GNcFx, *GNcFy, *GNdF, *GNdFx, *GNdFy, *GCT, *complexdummy; REAL *cor1,*Gcor1,*realdummy; COMPLEX *cor3x,*Gcor3x, *cor3y, *Gcor3y; hipfftHandle Gfftplan; // cublass stuff------------------------------------------------ hipblasHandle_t handle; //----------------------------------------------------------------- int memNXc,memNXr; REAL k1,k2,k3,k4,k5,k6,KM1,KM2,KM3,KM4,alpha,beta,St,D,Df; REAL totP,minP,maxP; REAL LX, dt; // double qx[N],qy[N],qsq; string ext; // for file I/O string file; char datname[200],filename[200],comd[200]; int steps,interval,zahl; REAL dx; //dx=L/N double dkx,scale=N*N; int i,j,k,k0; double tim=0.,timestart=0.0; string s; cor1 = new REAL[NX]; cor3x = new COMPLEX[NX]; cor3y = new COMPLEX[NX]; S = new REAL[NX]; H = new REAL[NX]; P = new REAL[NX]; R = new REAL[NX]; P1 = new REAL[NX]; realdummy = new REAL[NX]; complexdummy = new COMPLEX[NX]; hipDeviceReset(); hipSetDevice(GPUID); dBLOCK=dim3(MAXT,1); i=512; //x blocks, limited to 256^2 !!! k=i*dBLOCK.x; j=(NX+k-1)/k; //y blocks // printf("i=%d j=%d k=%d dBLOCK.x=%d\n",i,j,k,dBLOCK.x); dGRID=dim3(i,j); // Create CUDA FFT plan #ifdef DBLPREC hipfftPlan2d(&Gfftplan, N, N, HIPFFT_Z2Z) ; printf("double precision code\n"); #else hipfftPlan2d(&Gfftplan, N, N, HIPFFT_C2C) ; #endif hipblasCreate(&handle); memNXc=NX*sizeof(COMPLEX); memNXr=NX*sizeof(REAL); //complex arrays on GPU hipMalloc((void**)&GcP, memNXc); hipMalloc((void**)&GcFx, memNXc); hipMalloc((void**)&GcFy, memNXc); hipMalloc((void**)&GNcP, memNXc); hipMalloc((void**)&GNcFx, memNXc); hipMalloc((void**)&GNcFy, memNXc); hipMalloc((void**)&GNdF, memNXc); hipMalloc((void**)&GNdFx, memNXc); hipMalloc((void**)&GNdFy, memNXc); hipMalloc((void**)&GCT, memNXc); hipMalloc((void**)&Gcor3x, memNXc); hipMalloc((void**)&Gcor3y, memNXc); //real arrays on GPU hipMalloc((void**)&GP,memNXr); hipMalloc((void**)&GS,memNXr); hipMalloc((void**)&GH,memNXr); hipMalloc((void**)&GR,memNXr); hipMalloc((void**)&Gcor1, memNXr); hipMalloc((void**)&GCT1,memNXr); hipMalloc((void**)&GCT2,memNXr); hipMalloc((void**)&GP1,memNXr); //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // parameter input const int iNTRASH=256; char trashbuffer[iNTRASH]; ifstream fin("input_FP.par"); if (!fin.good()) { cerr << "Cannot find input_FP.par" << endl; return 1; } // if const int iINPUTSIZE = 17; REAL dFIP [iINPUTSIZE]; int iFileLine = 0; while ((iFileLine < iINPUTSIZE) && (fin.good())) { fin >> dFIP[iFileLine]; fin.getline(trashbuffer, iNTRASH); //cout << dFIP[iFileLine] << "\t" << trashbuffer << //endl; iFileLine++; } // while k1=dFIP[0]; k2=dFIP[1]; k3=dFIP[2]; k4=dFIP[3]; k5=dFIP[4]; k6=dFIP[5]; KM1=dFIP[6]; KM2=dFIP[7]; KM3=dFIP[8]; KM4=dFIP[9]; alpha=dFIP[10]; beta=dFIP[11]; St=dFIP[12]; D=dFIP[13]; dt=dFIP[14]; steps=dFIP[15]; interval=dFIP[16]; fin.getline(trashbuffer, iNTRASH); fin.close(); // LX=2*NX; //system size LX=15000; //system size dx=LX/N; dkx=2.*M_PI/LX; REAL coef=1./2./dx; Df=D*dt/dx/dx; cerr << "k1: \t" << k1 << "\n"; cerr << "k2: \t" << k2 << "\n"; cerr << "k3: \t" << k3 << "\n"; cerr << "k4: \t" << k4 << "\n"; cerr << "k5: \t" << k5 << "\n"; cerr << "k6: \t" << k6 << "\n"; cerr << "KM1: \t" << KM1 << "\n"; cerr << "KM2: \t" << KM2 << "\n"; cerr << "KM3: \t" << KM3 << "\n"; cerr << "KM4: \t" << KM4 << "\n"; cerr << "alpha: \t" <<alpha << "\n"; cerr << "beta: \t" <<beta << "\n"; cerr << "St: \t" <<St << "\n"; cerr << "D: \t" << D << "\n"; cerr << "LX: \t" << LX << "\n"; cerr << "dt: \t" << dt << "\n"; cerr << "steps: \t" << steps << "\n"; cerr << "interval: \t" << interval << "\n"; k1=coef*k1; k2=coef*k2; k3=coef*k3; k4=coef*k4; k5=coef*k5; k6=coef*k6; // sysvar sv_Vars [NX]; sysvar sv_Vars[1]; // parameters sv_Vars[0].k1dt = k1*dt; sv_Vars[0].k2dt = k2*dt; sv_Vars[0].k3dt = k3*dt; sv_Vars[0].k4dt = k4*dt; sv_Vars[0].k5dt = k5*dt; sv_Vars[0].k6dt = k6*dt; sv_Vars[0].KM1 = KM1; sv_Vars[0].KM2 = KM2; sv_Vars[0].KM3 = KM3; sv_Vars[0].KM4 = KM4; sv_Vars[0].alpha = alpha; sv_Vars[0].beta = beta; sv_Vars[0].St = St; sv_Vars[0].Df = Df; // allocate memory on the device sysvar* cu_Vars; size_t size_Vars = sizeof(sv_Vars); hipMalloc((void**)&cu_Vars, size_Vars); // copy to the device hipMemcpy(cu_Vars, sv_Vars, size_Vars, hipMemcpyHostToDevice); // hipMemcpy(sv_Vars, cu_Vars, size_Vars, hipMemcpyDeviceToHost); // cerr << "gamma1dt: \t" << sv_Vars[0].gamma1dt << "\n"; /* CUDA's random number library uses hiprandState_t to keep track of * the seed value we will store a random state for every thread */ hiprandState_t* states; /* allocate space on the GPU for the random states */ hipMalloc((void**) &states, NX * sizeof(hiprandState_t)); /* invoke the GPU to initialize all of the random states */ //init<<<dGRID, dBLOCK>>>(time(0), states); hipLaunchKernelGGL(( init), dim3(dGRID), dim3(dBLOCK), 0, 0, time(0), states); /* allocate an array of floats on the CPU and GPU */ // float cpu_nums[NX]; // float cpu_nums1[NX]; REAL *gpu_nums1; hipMalloc((void**) &gpu_nums1, NX * sizeof(REAL)); // randoms <<<dGRID, dBLOCK>>>(states, gpu_nums1); // hipMemcpy(cpu_nums, gpu_nums1, NX * sizeof(float), hipMemcpyDeviceToHost); // for(i=0;i<200;i++) // for(i=NX-20;i<NX;i++) // printf("test: %f\n",cpu_nums[i]); //new simulation for(i=0; i<N; i++) for(j=0; j<N; j++) { H[j+N*i]=dx*i; S[j+N*i]=dx*j; } // initial conditions mark int NEW; if(argc<2) NEW=1; else NEW=0; if(NEW==1){ k0=0; for(i=0; i<N; i++) for(i=0; i<N; i++){ P[j+N*i]=0; realdummy[j+N*i] = 0.; complexdummy[j+N*i].re = 0.; complexdummy[j+N*i].im = 0.; } REAL W=2000; totP=0; for(i=0; i<N; i++) for(j=0; j<N; j++) { REAL arg1=dx*(i-1.*N/4.)/W; REAL arg2=dx*(j-N/4.)/W; P[j+N*i]=exp(-(arg1*arg1+arg2*arg2)); totP+=P[j+N*i] ; realdummy[j+N*i] = 0.; complexdummy[j+N*i].re = 0.; complexdummy[j+N*i].im = 0.; } cout << "initial totP:" << "\t" << totP <<endl; } else { ifstream fin1("last_snapshot.dat"); if (!fin1.good()) { cerr << "Cannot find last_snapshot.dat" << endl; return 1; } // if i = 0; fin1 >> k0; fin1.getline(trashbuffer, iNTRASH); while ((i < NX) && (fin.good())) { fin1 >> P[i]; fin1.getline(trashbuffer, iNTRASH); i++; totP+=P[i] ; } // while fin1.close(); cout << "initial totP:" << "\t" << totP <<endl; } // //output params in file // file=ext+"/params.dat"; //add path to file file="params.dat"; //add path to file strcpy(filename,file.c_str()); //copy to cstring ofstream outsp; outsp.open (filename,ofstream::out ); //open file { outsp << "parameters" << endl << "----------" << endl << "grid size:\t" << N << endl << "steps :\t" << steps << endl << "L :\t" << LX << endl << "dt :\t" << dt << endl << "k1 :\t" << k1 << endl << "k2 :\t" << k2 << endl << "k3 :\t" << k3 << endl << "k4 :\t" << k4 << endl << "k5 :\t" << k5 << endl << "k6 :\t" << k6 << endl << "KM1 :\t" << KM1 << endl << "KM2 :\t" << KM2 << endl << "KM3 :\t" << KM3 << endl << "KM4 :\t" << KM4 << endl << "alpha :\t" << alpha << endl << "beta :\t" << beta << endl << "St :\t" << St << endl << "D :\t" << D << endl; } outsp.close(); //calculate q's for fourier operations for(i=0; i<=N/2; i++) { qx[i]=dkx*i; //dk/2*i; qy[i]=dkx*i; //dk/2*i; } for(i=1; i<N/2; i++) { qx[N-i]=-dkx*i; //-dk/2*i; qy[N-i]=-dkx*i; //-dk/2*i; } // qx[N/2]=0; (only for first derivatives) /* //cout << endl; cout << "----------------------------" << endl; cout << "qmax : " << qx[N/2] << endl; cout << "Deltaq : " << qx[1] << "\t" << dk << endl; cout << "sys-L : " << L << "\t" << N*dx << endl; cout << "dx : " << dx << endl; cout << "----------------------------" << endl; */ // cor matrix for(i=0; i<N; i++) for (j=0; j<N; j++) { qsq=qx[i]*qx[i]+qy[j]*qy[j]; cor1[j+N*i]=exp(-dt*D*qsq)/scale; cor3x[j+N*i].re=0; cor3x[j+N*i].im=qx[i]*exp(-0.1*D*dt*qsq)/scale; cor3y[j+N*i].re=0; cor3y[j+N*i].im=qy[j]*exp(-0.1*D*dt*qsq)/scale; } hipMemcpy(GS, S, memNXr, hipMemcpyHostToDevice); hipMemcpy(GH, H, memNXr, hipMemcpyHostToDevice); hipMemcpy(GP, P, memNXr, hipMemcpyHostToDevice); hipMemcpy(Gcor1, cor1, memNXr, hipMemcpyHostToDevice); hipMemcpy(Gcor3x, cor3x, memNXc, hipMemcpyHostToDevice); hipMemcpy(Gcor3y, cor3y, memNXc, hipMemcpyHostToDevice); //initialize all other GPU Arrays with dummy zeros - seems to be necessary for older //Graphic Cards like GeForce GTX 285 // hipMemcpy(GNcS, complexdummy, memNXc, hipMemcpyHostToDevice); // hipMemcpy(GNdS, complexdummy, memNXc, hipMemcpyHostToDevice); int iout=0; //timestep //-------------------------------------------------------------------------------------- for(k=k0; k<steps+1; k++) { tim+=dt; // plot output // if(k%interval==0 && k!=0) if(k%interval==0 ) { zahl=int(timestart)+int(k/interval); cout << "time=" << k*dt << endl; //---copy data from GPU to CPU ----------------- // hipMemcpy(S, GS, memNXr, hipMemcpyDeviceToHost); // hipMemcpy(H, GH, memNXr, hipMemcpyDeviceToHost); hipMemcpy(P, GP, memNXr, hipMemcpyDeviceToHost); hipMemcpy(R, GR, memNXr, hipMemcpyDeviceToHost); //output P in file sprintf(datname,"n_%5.5d.dat",zahl); // file=ext+"/"+datname; //add path to file file=datname; strcpy(filename,file.c_str()); //copy to cstring ofstream outsr; outsr.open (filename, ofstream::out ); for(i=0; i<NX; i++) { outsr // << i*dx << "\t" << P[i] // << "\t" << R[i] << endl; } outsr.close(); system("rm -f last_snapshot.dat"); file="last_snapshot.dat"; strcpy(filename,file.c_str()); //copy to cstring outsr.open (filename, ofstream::out ); outsr << k << endl; /* for(i=0; i<NX; i++) { outsr << P[i] << endl; } */ outsr.close(); sprintf(comd,"cat %s >> last_snapshot.dat",datname); system(comd); totP=0; minP=1000; maxP=-1000; for(i=0; i<N; i++) for(j=0; j<N; j++) { totP+=P[j+N*i] ; if (P[j+N*i]>maxP)maxP=P[j+N*i]; if (P[j+N*i]<minP)minP=P[j+N*i]; } cout << "current totP: " << "\t" << totP << " minP: " << minP << " maxP: " << maxP <<endl; iout++; }// end output //////////////////////////////////////////////////////////////////// // compute diffusion and drift terms int SH=0; REAL totPb=0; REAL totPa=0; if(SH==1){ hipMemcpy(P, GP, memNXr, hipMemcpyDeviceToHost); for(i=0; i<NX; i++) { totPb+=P[i] ; } // cout << "totP before:" << "\t" << totP <<endl; } hipLaunchKernelGGL(( flux), dim3(dGRID), dim3(dBLOCK), 0, 0, GH, GS, GP, GP1, cu_Vars); //compute d/dH(Ax(H,S)*P) and d/dS(Ay(H,S)*P) hipLaunchKernelGGL(( copy_P), dim3(dGRID), dim3(dBLOCK), 0, 0, GP1, GP); //copy GP1 into GP if(SH==1){ hipMemcpy(P1, GP1, memNXr, hipMemcpyDeviceToHost); for(i=0; i<NX; i++) { totPa+=P1[i] ; } //if(k%interval==0 ) if(k==0 ) cout << "totP diff=" << totPa-totPb <<endl; } /////////////////////////////////////////////////////////////////////////////////////////////// }// end timestep totP=0; minP=1000; maxP=-1000; for(i=0; i<NX; i++) { totP+=P[i] ; if (P[i]>maxP)maxP=P[i]; if (P[i]<minP)minP=P[i]; } cout << "current totP:" << "\t" << totP << "minP:" << minP << "maxP:" << maxP <<endl; /* ofstream outsr1; outsr1.open ("density.d", ofstream::out | ofstream::app ); outsr1 << St << "\t" << "\t" << totP << endl; outsr1.close(); */ // free up the allocated memory on the device delete[] P; delete[] cor1; delete[] realdummy; delete[] complexdummy; hipFree(GP); hipFree(GcP); hipFree(GcFx); hipFree(GcFy); hipFree(GNcP); hipFree(GNcFx); hipFree(GNcFy); hipFree(GNdF); hipFree(GNdFx); hipFree(GNdFy); hipFree(Gcor1); hipFree(Gcor3x); hipFree(Gcor3y); hipfftDestroy(Gfftplan); hipblasDestroy(handle); /* free the memory we allocated for the states and numbers */ hipFree(states); hipFree(gpu_nums1); hipError_t errcode = hipGetLastError(); return 0; }
347b512c6c960b00450c65883a39c44998482a07.cu
////---------------------------------------- // two-dimensional Fokker-Planck equation // //---------------------------------------- // parameters of simulations are in "input_FP.par" // // compile: // nvcc fokker-planck_2d.cu -lcuda -lcufft -lcublas -O3 -arch sm_20 -o fp2d // // produces output files "n_#####.dat" with (i, j, n, S) columns // // // // uses complex.h // and semi-implicit algorithm #include<iostream> #include<fstream> #include<cstring> #include<stdlib.h> #include<math.h> #include <iomanip> #include <cmath> #include <queue> /* we need these includes for CUDA's random number stuff */ #include <unistd.h> #include <stdio.h> #include <curand.h> #include <curand_kernel.h> //#include "fileutils.h" //#include "stringutils.h" // #include<complex.h> // #include<fftw3.h> #include <cuda.h> #include <cufft.h> #include <cublas_v2.h> #include <cuda_runtime.h> // #include <cutil_inline.h> // #include "reduction_kernel.cu" #define N 256// was 256 #define NX (N*N) using namespace std; //#define DBLPREC #ifdef DBLPREC #define MAXT 512 //was 512 typedef double REAL; #else #define MAXT 128 //was 1024 typedef float REAL; #endif typedef struct { REAL re; REAL im; } COMPLEX; /////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////// /* this GPU kernel function is used to initialize the random states */ __global__ void init(unsigned int seed, curandState_t* states) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; /* we have to initialize the state */ if(idx<NX) { curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */ idx, /* the sequence number should be different for each core (unless you want all cores to get the same sequence of numbers for some reason - use thread id! */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &states[idx]); } } typedef struct { // parameters REAL k1dt, k2dt, k3dt, k4dt, k5dt, k6dt, KM1, KM2, KM3, KM4, Df, alpha, beta, St; } sysvar; //=========================================================================== double rn() { return drand48(); } //=========================================================================== //CUDA Kernels //=========================================================================== __global__ void r2c(COMPLEX *cS, REAL *S, REAL *S1 ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { cS[idx].re=S[idx]; cS[idx].im=S1[idx]; } }; __global__ void r2c1(COMPLEX *cS, REAL *S ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { cS[idx].re=S[idx]; cS[idx].im=0.; } }; __global__ void c2r(COMPLEX *cS, REAL *S, REAL *S1 ) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { S[idx]=cS[idx].re; S1[idx]=cS[idx].im; } }; /* this GPU kernel takes an array of states, and an array of ints, and * puts a random int into each */ __global__ void randoms(curandState_t* states, REAL * numbers) { /* curand works like rand - except that it takes a state as a parameter */ int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { numbers[idx] = curand_uniform(&states[idx]); } } //--------------------------------------------------------------------------- //update P //--------------------------------------------------------------------------- __global__ void flux(REAL *H, REAL *S, REAL *P, REAL *P1, sysvar* cu_Vars) { REAL h, s, h2, s2, h4, s4; REAL Ahp, Asp, hp, sp, hp2, sp2, hp4, sp4; REAL Ahm, Asm, hm, sm, hm2, sm2, hm4, sm4; REAL Dhm, Dsm, Dhp, Dsp; int i,j; // read parameters REAL k1dt = cu_Vars[0].k1dt; REAL k2dt = cu_Vars[0].k2dt; REAL k3dt = cu_Vars[0].k3dt; REAL k4dt = cu_Vars[0].k4dt; REAL k5dt = cu_Vars[0].k5dt; REAL k6dt = cu_Vars[0].k6dt; REAL KM1 = cu_Vars[0].KM1; REAL KM2 = cu_Vars[0].KM2; REAL KM3 = cu_Vars[0].KM3; REAL KM4 = cu_Vars[0].KM4; REAL alpha = cu_Vars[0].alpha; REAL beta = cu_Vars[0].beta; REAL St = cu_Vars[0].St; REAL Df = cu_Vars[0].Df; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; REAL factor=St/15000; if(idx>=0 && idx<NX) { h=H[idx]; s=factor*S[idx]; h2=h*h; s2=s*s; h4=h2*h2; s4=s2*s2; i=(int)(idx/N); j=idx-i*N; hp=H[j+N*(i+1)]; hm=H[j+N*(i-1)]; hp2=hp*hp; hp4=hp2*hp2; hm2=hm*hm; hm4=hm2*hm2; sp=factor*S[j+1+N*i]; sm=factor*S[j-1+N*i]; sp2=sp*sp; sp4=sp2*sp2; sm2=sm*sm; sm4=sm2*sm2; if(i>0&&i<N-1){ Ahp=P[j+N*(i+1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hp4/(hp4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hp); Ahm=P[j+N*(i-1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hm4/(hm4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hm); Dhp=P[j+N*(i+1)]-P[j+N*i]; Dhm=P[j+N*(i-1)]-P[j+N*i]; } else if (i==0){ Ahp=P[j+N*(i+1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hp4/(hp4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hp); Ahm=-P[j+N*(i)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*h4/(h4+KM2*KM2*KM2*KM2)+k2dt-k3dt*h); Dhp=P[j+N*(i+1)]-P[j+N*i]; Dhm=0; } else if (i==N-1){ Ahp=-P[j+N*(i)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*h4/(h4+KM2*KM2*KM2*KM2)+k2dt-k3dt*h); Ahm=P[j+N*(i-1)]*(k1dt*((1-alpha)*s2+KM1*KM1)/(s2+KM1*KM1)*hm4/(hm4+KM2*KM2*KM2*KM2)+k2dt-k3dt*hm); Dhp=0; Dhm=P[j+N*(i-1)]-P[j+N*i]; } if(j>0&&j<N-1){ Asp=P[j+1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sp4/(sp4+KM4*KM4*KM4*KM4)*(St-sp)+k5dt-k6dt*sp); Asm=P[j-1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sm4/(sm4+KM4*KM4*KM4*KM4)*(St-sm)+k5dt-k6dt*sm); Dsp=P[j+1+N*i]-P[j+N*i]; Dsm=P[j-1+N*i]-P[j+N*i]; } else if (j==0) { Asp=P[j+1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sp4/(sp4+KM4*KM4*KM4*KM4)*(St-sp)+k5dt-k6dt*sp); Asm=-P[j+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*s4/(s4+KM4*KM4*KM4*KM4)*(St-s)+k5dt-k6dt*s); Dsp=P[j+1+N*i]-P[j+N*i]; Dsm=0; } else if (j==N-1) { Asp=-P[j+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*s4/(s4+KM4*KM4*KM4*KM4)*(St-s)+k5dt-k6dt*s); Asm=P[j-1+N*i]*(k4dt*((1-beta)*h2+KM3*KM3)/(h2+KM3*KM3)*sm4/(sm4+KM4*KM4*KM4*KM4)*(St-sm)+k5dt-k6dt*sm); Dsp=0; Dsm=P[j-1+N*i]-P[j+N*i]; } //P1[idx]=P[idx]-Ahp+Ahm-Asp+Asm+Df*(Dhp+Dhm+Dsp+Dsm); P1[idx]=P[idx]-Ahp+Ahm-Asp+Asm+Df*(Dhp+Dhm)+0.01*Df*(Dsp+Dsm); if(P1[idx]<0) P1[idx]=0; } }; __global__ void combine(COMPLEX *GNdFx, COMPLEX *GNdFy, COMPLEX *GNdF) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { GNdF[idx].re=GNdFx[idx].re+GNdFy[idx].re; GNdF[idx].im=GNdFx[idx].im+GNdFy[idx].im; } }; __global__ void copy_P(REAL *n, REAL *na) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; if(idx<NX) { na[idx]=n[idx]; } } //--------------------------------------------------------------------------- //--------------------------------------------------------------------------- int main(int argc, char** argv) { dim3 dGRID,dBLOCK; int GPUID=0; REAL *H,*GH; REAL *S,*GS; REAL *R,*GR; REAL *P, *P1, *GP, *GP1, *GCT1, *GCT2; COMPLEX *GcP, *GcFx, *GcFy, *GNcP, *GNcFx, *GNcFy, *GNdF, *GNdFx, *GNdFy, *GCT, *complexdummy; REAL *cor1,*Gcor1,*realdummy; COMPLEX *cor3x,*Gcor3x, *cor3y, *Gcor3y; cufftHandle Gfftplan; // cublass stuff------------------------------------------------ cublasHandle_t handle; //----------------------------------------------------------------- int memNXc,memNXr; REAL k1,k2,k3,k4,k5,k6,KM1,KM2,KM3,KM4,alpha,beta,St,D,Df; REAL totP,minP,maxP; REAL LX, dt; // double qx[N],qy[N],qsq; string ext; // for file I/O string file; char datname[200],filename[200],comd[200]; int steps,interval,zahl; REAL dx; //dx=L/N double dkx,scale=N*N; int i,j,k,k0; double tim=0.,timestart=0.0; string s; cor1 = new REAL[NX]; cor3x = new COMPLEX[NX]; cor3y = new COMPLEX[NX]; S = new REAL[NX]; H = new REAL[NX]; P = new REAL[NX]; R = new REAL[NX]; P1 = new REAL[NX]; realdummy = new REAL[NX]; complexdummy = new COMPLEX[NX]; cudaThreadExit(); cudaSetDevice(GPUID); dBLOCK=dim3(MAXT,1); i=512; //x blocks, limited to 256^2 !!! k=i*dBLOCK.x; j=(NX+k-1)/k; //y blocks // printf("i=%d j=%d k=%d dBLOCK.x=%d\n",i,j,k,dBLOCK.x); dGRID=dim3(i,j); // Create CUDA FFT plan #ifdef DBLPREC cufftPlan2d(&Gfftplan, N, N, CUFFT_Z2Z) ; printf("double precision code\n"); #else cufftPlan2d(&Gfftplan, N, N, CUFFT_C2C) ; #endif cublasCreate(&handle); memNXc=NX*sizeof(COMPLEX); memNXr=NX*sizeof(REAL); //complex arrays on GPU cudaMalloc((void**)&GcP, memNXc); cudaMalloc((void**)&GcFx, memNXc); cudaMalloc((void**)&GcFy, memNXc); cudaMalloc((void**)&GNcP, memNXc); cudaMalloc((void**)&GNcFx, memNXc); cudaMalloc((void**)&GNcFy, memNXc); cudaMalloc((void**)&GNdF, memNXc); cudaMalloc((void**)&GNdFx, memNXc); cudaMalloc((void**)&GNdFy, memNXc); cudaMalloc((void**)&GCT, memNXc); cudaMalloc((void**)&Gcor3x, memNXc); cudaMalloc((void**)&Gcor3y, memNXc); //real arrays on GPU cudaMalloc((void**)&GP,memNXr); cudaMalloc((void**)&GS,memNXr); cudaMalloc((void**)&GH,memNXr); cudaMalloc((void**)&GR,memNXr); cudaMalloc((void**)&Gcor1, memNXr); cudaMalloc((void**)&GCT1,memNXr); cudaMalloc((void**)&GCT2,memNXr); cudaMalloc((void**)&GP1,memNXr); //////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////// // parameter input const int iNTRASH=256; char trashbuffer[iNTRASH]; ifstream fin("input_FP.par"); if (!fin.good()) { cerr << "Cannot find input_FP.par" << endl; return 1; } // if const int iINPUTSIZE = 17; REAL dFIP [iINPUTSIZE]; int iFileLine = 0; while ((iFileLine < iINPUTSIZE) && (fin.good())) { fin >> dFIP[iFileLine]; fin.getline(trashbuffer, iNTRASH); //cout << dFIP[iFileLine] << "\t" << trashbuffer << //endl; iFileLine++; } // while k1=dFIP[0]; k2=dFIP[1]; k3=dFIP[2]; k4=dFIP[3]; k5=dFIP[4]; k6=dFIP[5]; KM1=dFIP[6]; KM2=dFIP[7]; KM3=dFIP[8]; KM4=dFIP[9]; alpha=dFIP[10]; beta=dFIP[11]; St=dFIP[12]; D=dFIP[13]; dt=dFIP[14]; steps=dFIP[15]; interval=dFIP[16]; fin.getline(trashbuffer, iNTRASH); fin.close(); // LX=2*NX; //system size LX=15000; //system size dx=LX/N; dkx=2.*M_PI/LX; REAL coef=1./2./dx; Df=D*dt/dx/dx; cerr << "k1: \t" << k1 << "\n"; cerr << "k2: \t" << k2 << "\n"; cerr << "k3: \t" << k3 << "\n"; cerr << "k4: \t" << k4 << "\n"; cerr << "k5: \t" << k5 << "\n"; cerr << "k6: \t" << k6 << "\n"; cerr << "KM1: \t" << KM1 << "\n"; cerr << "KM2: \t" << KM2 << "\n"; cerr << "KM3: \t" << KM3 << "\n"; cerr << "KM4: \t" << KM4 << "\n"; cerr << "alpha: \t" <<alpha << "\n"; cerr << "beta: \t" <<beta << "\n"; cerr << "St: \t" <<St << "\n"; cerr << "D: \t" << D << "\n"; cerr << "LX: \t" << LX << "\n"; cerr << "dt: \t" << dt << "\n"; cerr << "steps: \t" << steps << "\n"; cerr << "interval: \t" << interval << "\n"; k1=coef*k1; k2=coef*k2; k3=coef*k3; k4=coef*k4; k5=coef*k5; k6=coef*k6; // sysvar sv_Vars [NX]; sysvar sv_Vars[1]; // parameters sv_Vars[0].k1dt = k1*dt; sv_Vars[0].k2dt = k2*dt; sv_Vars[0].k3dt = k3*dt; sv_Vars[0].k4dt = k4*dt; sv_Vars[0].k5dt = k5*dt; sv_Vars[0].k6dt = k6*dt; sv_Vars[0].KM1 = KM1; sv_Vars[0].KM2 = KM2; sv_Vars[0].KM3 = KM3; sv_Vars[0].KM4 = KM4; sv_Vars[0].alpha = alpha; sv_Vars[0].beta = beta; sv_Vars[0].St = St; sv_Vars[0].Df = Df; // allocate memory on the device sysvar* cu_Vars; size_t size_Vars = sizeof(sv_Vars); cudaMalloc((void**)&cu_Vars, size_Vars); // copy to the device cudaMemcpy(cu_Vars, sv_Vars, size_Vars, cudaMemcpyHostToDevice); // cudaMemcpy(sv_Vars, cu_Vars, size_Vars, cudaMemcpyDeviceToHost); // cerr << "gamma1dt: \t" << sv_Vars[0].gamma1dt << "\n"; /* CUDA's random number library uses curandState_t to keep track of * the seed value we will store a random state for every thread */ curandState_t* states; /* allocate space on the GPU for the random states */ cudaMalloc((void**) &states, NX * sizeof(curandState_t)); /* invoke the GPU to initialize all of the random states */ //init<<<dGRID, dBLOCK>>>(time(0), states); init<<<dGRID, dBLOCK>>>(time(0), states); /* allocate an array of floats on the CPU and GPU */ // float cpu_nums[NX]; // float cpu_nums1[NX]; REAL *gpu_nums1; cudaMalloc((void**) &gpu_nums1, NX * sizeof(REAL)); // randoms <<<dGRID, dBLOCK>>>(states, gpu_nums1); // cudaMemcpy(cpu_nums, gpu_nums1, NX * sizeof(float), cudaMemcpyDeviceToHost); // for(i=0;i<200;i++) // for(i=NX-20;i<NX;i++) // printf("test: %f\n",cpu_nums[i]); //new simulation for(i=0; i<N; i++) for(j=0; j<N; j++) { H[j+N*i]=dx*i; S[j+N*i]=dx*j; } // initial conditions mark int NEW; if(argc<2) NEW=1; else NEW=0; if(NEW==1){ k0=0; for(i=0; i<N; i++) for(i=0; i<N; i++){ P[j+N*i]=0; realdummy[j+N*i] = 0.; complexdummy[j+N*i].re = 0.; complexdummy[j+N*i].im = 0.; } REAL W=2000; totP=0; for(i=0; i<N; i++) for(j=0; j<N; j++) { REAL arg1=dx*(i-1.*N/4.)/W; REAL arg2=dx*(j-N/4.)/W; P[j+N*i]=exp(-(arg1*arg1+arg2*arg2)); totP+=P[j+N*i] ; realdummy[j+N*i] = 0.; complexdummy[j+N*i].re = 0.; complexdummy[j+N*i].im = 0.; } cout << "initial totP:" << "\t" << totP <<endl; } else { ifstream fin1("last_snapshot.dat"); if (!fin1.good()) { cerr << "Cannot find last_snapshot.dat" << endl; return 1; } // if i = 0; fin1 >> k0; fin1.getline(trashbuffer, iNTRASH); while ((i < NX) && (fin.good())) { fin1 >> P[i]; fin1.getline(trashbuffer, iNTRASH); i++; totP+=P[i] ; } // while fin1.close(); cout << "initial totP:" << "\t" << totP <<endl; } // //output params in file // file=ext+"/params.dat"; //add path to file file="params.dat"; //add path to file strcpy(filename,file.c_str()); //copy to cstring ofstream outsp; outsp.open (filename,ofstream::out ); //open file { outsp << "parameters" << endl << "----------" << endl << "grid size:\t" << N << endl << "steps :\t" << steps << endl << "L :\t" << LX << endl << "dt :\t" << dt << endl << "k1 :\t" << k1 << endl << "k2 :\t" << k2 << endl << "k3 :\t" << k3 << endl << "k4 :\t" << k4 << endl << "k5 :\t" << k5 << endl << "k6 :\t" << k6 << endl << "KM1 :\t" << KM1 << endl << "KM2 :\t" << KM2 << endl << "KM3 :\t" << KM3 << endl << "KM4 :\t" << KM4 << endl << "alpha :\t" << alpha << endl << "beta :\t" << beta << endl << "St :\t" << St << endl << "D :\t" << D << endl; } outsp.close(); //calculate q's for fourier operations for(i=0; i<=N/2; i++) { qx[i]=dkx*i; //dk/2*i; qy[i]=dkx*i; //dk/2*i; } for(i=1; i<N/2; i++) { qx[N-i]=-dkx*i; //-dk/2*i; qy[N-i]=-dkx*i; //-dk/2*i; } // qx[N/2]=0; (only for first derivatives) /* //cout << endl; cout << "----------------------------" << endl; cout << "qmax : " << qx[N/2] << endl; cout << "Deltaq : " << qx[1] << "\t" << dk << endl; cout << "sys-L : " << L << "\t" << N*dx << endl; cout << "dx : " << dx << endl; cout << "----------------------------" << endl; */ // cor matrix for(i=0; i<N; i++) for (j=0; j<N; j++) { qsq=qx[i]*qx[i]+qy[j]*qy[j]; cor1[j+N*i]=exp(-dt*D*qsq)/scale; cor3x[j+N*i].re=0; cor3x[j+N*i].im=qx[i]*exp(-0.1*D*dt*qsq)/scale; cor3y[j+N*i].re=0; cor3y[j+N*i].im=qy[j]*exp(-0.1*D*dt*qsq)/scale; } cudaMemcpy(GS, S, memNXr, cudaMemcpyHostToDevice); cudaMemcpy(GH, H, memNXr, cudaMemcpyHostToDevice); cudaMemcpy(GP, P, memNXr, cudaMemcpyHostToDevice); cudaMemcpy(Gcor1, cor1, memNXr, cudaMemcpyHostToDevice); cudaMemcpy(Gcor3x, cor3x, memNXc, cudaMemcpyHostToDevice); cudaMemcpy(Gcor3y, cor3y, memNXc, cudaMemcpyHostToDevice); //initialize all other GPU Arrays with dummy zeros - seems to be necessary for older //Graphic Cards like GeForce GTX 285 // cudaMemcpy(GNcS, complexdummy, memNXc, cudaMemcpyHostToDevice); // cudaMemcpy(GNdS, complexdummy, memNXc, cudaMemcpyHostToDevice); int iout=0; //timestep //-------------------------------------------------------------------------------------- for(k=k0; k<steps+1; k++) { tim+=dt; // plot output // if(k%interval==0 && k!=0) if(k%interval==0 ) { zahl=int(timestart)+int(k/interval); cout << "time=" << k*dt << endl; //---copy data from GPU to CPU ----------------- // cudaMemcpy(S, GS, memNXr, cudaMemcpyDeviceToHost); // cudaMemcpy(H, GH, memNXr, cudaMemcpyDeviceToHost); cudaMemcpy(P, GP, memNXr, cudaMemcpyDeviceToHost); cudaMemcpy(R, GR, memNXr, cudaMemcpyDeviceToHost); //output P in file sprintf(datname,"n_%5.5d.dat",zahl); // file=ext+"/"+datname; //add path to file file=datname; strcpy(filename,file.c_str()); //copy to cstring ofstream outsr; outsr.open (filename, ofstream::out ); for(i=0; i<NX; i++) { outsr // << i*dx << "\t" << P[i] // << "\t" << R[i] << endl; } outsr.close(); system("rm -f last_snapshot.dat"); file="last_snapshot.dat"; strcpy(filename,file.c_str()); //copy to cstring outsr.open (filename, ofstream::out ); outsr << k << endl; /* for(i=0; i<NX; i++) { outsr << P[i] << endl; } */ outsr.close(); sprintf(comd,"cat %s >> last_snapshot.dat",datname); system(comd); totP=0; minP=1000; maxP=-1000; for(i=0; i<N; i++) for(j=0; j<N; j++) { totP+=P[j+N*i] ; if (P[j+N*i]>maxP)maxP=P[j+N*i]; if (P[j+N*i]<minP)minP=P[j+N*i]; } cout << "current totP: " << "\t" << totP << " minP: " << minP << " maxP: " << maxP <<endl; iout++; }// end output //////////////////////////////////////////////////////////////////// // compute diffusion and drift terms int SH=0; REAL totPb=0; REAL totPa=0; if(SH==1){ cudaMemcpy(P, GP, memNXr, cudaMemcpyDeviceToHost); for(i=0; i<NX; i++) { totPb+=P[i] ; } // cout << "totP before:" << "\t" << totP <<endl; } flux<<<dGRID, dBLOCK>>>(GH, GS, GP, GP1, cu_Vars); //compute d/dH(Ax(H,S)*P) and d/dS(Ay(H,S)*P) copy_P<<<dGRID, dBLOCK>>>(GP1, GP); //copy GP1 into GP if(SH==1){ cudaMemcpy(P1, GP1, memNXr, cudaMemcpyDeviceToHost); for(i=0; i<NX; i++) { totPa+=P1[i] ; } //if(k%interval==0 ) if(k==0 ) cout << "totP diff=" << totPa-totPb <<endl; } /////////////////////////////////////////////////////////////////////////////////////////////// }// end timestep totP=0; minP=1000; maxP=-1000; for(i=0; i<NX; i++) { totP+=P[i] ; if (P[i]>maxP)maxP=P[i]; if (P[i]<minP)minP=P[i]; } cout << "current totP:" << "\t" << totP << "minP:" << minP << "maxP:" << maxP <<endl; /* ofstream outsr1; outsr1.open ("density.d", ofstream::out | ofstream::app ); outsr1 << St << "\t" << "\t" << totP << endl; outsr1.close(); */ // free up the allocated memory on the device delete[] P; delete[] cor1; delete[] realdummy; delete[] complexdummy; cudaFree(GP); cudaFree(GcP); cudaFree(GcFx); cudaFree(GcFy); cudaFree(GNcP); cudaFree(GNcFx); cudaFree(GNcFy); cudaFree(GNdF); cudaFree(GNdFx); cudaFree(GNdFy); cudaFree(Gcor1); cudaFree(Gcor3x); cudaFree(Gcor3y); cufftDestroy(Gfftplan); cublasDestroy(handle); /* free the memory we allocated for the states and numbers */ cudaFree(states); cudaFree(gpu_nums1); cudaError_t errcode = cudaGetLastError(); return 0; }
d9386b5d8b3cd8a3c8187be53062256962ef9c88.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2007-2020 The scikit-learn developers. // Copyright 2020 Google LLC. // Copyright 2021 Teddy Koker. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // a. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // b. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // c. Neither the name of the Scikit-learn Developers nor the names of // its contributors may be used to endorse or promote products // derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. #include <torch/extension.h> // #include <hip/hip_runtime.h> // #include <hip/hip_runtime.h> // #include <iostream> // Copied from fast-soft-sort (https://bit.ly/3r0gOav) with the following modifications: // - replace numpy functions with torch equivalents // - re-write in CUDA // - return solution in place // - added backward pass (vector jacobian product) // Copied from scikit-learn with the following modifications: // - use decreasing constraints by default, // - do not return solution in place, rather save in array `sol`, // - avoid some needless multiplications. // namespace { // Numerically stable log-add-exp template <typename scalar_t> __device__ __forceinline__ scalar_t log_add_exp(scalar_t x, scalar_t y) { scalar_t larger = max(x, y); scalar_t smaller = min(x, y); return larger + log1p(exp(smaller - larger)); } // Returns partition corresponding to solution. Expects sizes to be zeros template <typename scalar_t> __device__ void partition( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> solution, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int b) { const scalar_t eps = 1.0e-9; int tail = 1; if (n > 0) { sizes[b][0] = 1; } for (int i = 1; i < n; i++) { if (std::abs(solution[b][i] - solution[b][i - 1]) > eps) { tail += 1; } sizes[b][tail - 1] += 1; } } template <typename scalar_t> __global__ void isotonic_l2_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sums, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> c, int n, int batch) { const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } // target describes a list of blocks. at any time, if [i..j] (inclusive) is // an active block, then target[i] := j and target[j] := i. for (int i = 0; i < n; i++) { c[b][i] = 1.0; sol[b][i] = s[b][i]; sums[b][i] = s[b][i]; target[b][i] = i; } int i = 0; while (i < n) { auto k = target[b][i] + 1; if (k == n) { break; } if (sol[b][i] > sol[b][k]) { i = k; continue; } auto sum_y = sums[b][i]; auto sum_c = c[b][i]; while (true) { // We are within an increasing subsequence auto prev_y = sol[b][k]; sum_y += sums[b][k]; sum_c += c[b][k]; k = target[b][k] + 1; if ((k == n) || (prev_y > sol[b][k])) { // Non-singleton increasing subsequence is finished, // update first entry. sol[b][i] = sum_y / sum_c; sums[b][i] = sum_y; c[b][i] = sum_c; target[b][i] = k - 1; target[b][k - 1] = i; if (i > 0) { // Backtrack if we can. This makes the algorithm // single-pass and ensures O(n) complexity. i = target[b][i - 1]; } // Otherwise, restart from the same point break; } } } // Reconstruct the solution i = 0; while (i < n) { auto k = target[b][i] + 1; for (int j = i + 1; j < k; j++) { sol[b][j] = sol[b][i]; } i = k; } } template <typename scalar_t> __global__ void isotonic_kl_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> y, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_y_, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_w_, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target, int n, int batch) { const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } // target describes a list of blocks. At any time, if [i..j] (inclusive) is // an active block, then target[i] := j and target[j] := i. for (int i = 0; i < n; i++) { sol[b][i] = y[b][i] - w[b][i]; lse_y_[b][i] = y[b][i]; lse_w_[b][i] = w[b][i]; target[b][i] = i; } int i = 0; while (i < n) { auto k = target[b][i] + 1; if (k == n) { break; } if (sol[b][i] > sol[b][k]) { i = k; continue; } auto lse_y = lse_y_[b][i]; auto lse_w = lse_w_[b][i]; while (true) { // We are within an increasing subsequence auto prev_y = sol[b][k]; lse_y = log_add_exp(lse_y, lse_y_[b][k]); lse_w = log_add_exp(lse_w, lse_w_[b][k]); k = target[b][k] + 1; if ((k == n) || (prev_y > sol[b][k])) { // Non-singleton increasing subsequence is finished, // update first entry. sol[b][i] = lse_y - lse_w; lse_y_[b][i] = lse_y; lse_w_[b][i] = lse_w; target[b][i] = k - 1; target[b][k - 1] = i; if (i > 0) { // Backtrack if we can. This makes the algorithm // single-pass and ensures O(n) complexity. i = target[b][i - 1]; } // Otherwise, restart from the same point break; } } } // Reconstruct the solution i = 0; while (i < n) { auto k = target[b][i] + 1; for (int j = i + 1; j < k; j++) { sol[b][j] = sol[b][i]; } i = k; } } template <typename scalar_t> __global__ void isotonic_l2_backward_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, // not used torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int batch) { int end; scalar_t sum; scalar_t val; const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } int start = 0; partition(sol, sizes, n, b); for (int size = 0; (sizes[b][size] > 0 && size < n); size++) { end = start + sizes[b][size]; sum = 0; val = 1.0 / (scalar_t) sizes[b][size]; for (int i = start; i < end; i++) { sum += grad_input[b][i]; } for (int i = start; i < end; i++) { ret[b][i] = val * sum; } start = end; } } template <typename scalar_t> __global__ void isotonic_kl_backward_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int batch) { int end; scalar_t sum; scalar_t softmax; const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } int start = 0; partition(sol, sizes, n, b); for (int size = 0; (sizes[b][size] > 0 && size < n); size++) { end = start + sizes[b][size]; sum = 0; softmax = 0; for (int i = start; i < end; i++) { softmax += ::exp(s[b][i]); sum += grad_input[b][i]; } for (int i = start; i < end; i++) { ret[b][i] = ::exp(s[b][i]) / softmax * sum; } start = end; } } // Solves an isotonic regression problem using PAV. // Formally, it solves argmin_{v_1 >= ... >= v_n} 0.5 ||v - y||^2. torch::Tensor isotonic_l2(torch::Tensor y) { auto batch = y.size(0); auto n = y.size(1); auto sol = torch::zeros_like(y); auto sums = torch::zeros_like(y); auto target = torch::zeros_like(y); auto c = torch::zeros_like(y); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_l2", ([&] { hipLaunchKernelGGL(( isotonic_l2_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sums.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), c.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return sol; } // Solves isotonic optimization with KL divergence using PAV. // Formally, it solves argmin_{v_1 >= ... >= v_n} <e^{y-v}, 1> + <e^w, v>. torch::Tensor isotonic_kl(torch::Tensor y, torch::Tensor w) { auto batch = y.size(0); auto n = y.size(1); auto sol = torch::zeros_like(y); auto lse_y_ = torch::zeros_like(y); auto lse_w_ = torch::zeros_like(y); auto target = torch::zeros_like(y); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_kl", ([&] { hipLaunchKernelGGL(( isotonic_kl_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), w.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), lse_y_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), lse_w_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return sol; } torch::Tensor isotonic_l2_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) { auto batch = sol.size(0); auto n = sol.size(1); auto ret = torch::zeros_like(sol); auto sizes = torch::zeros_like(sol); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_l2_backward", ([&] { hipLaunchKernelGGL(( isotonic_l2_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return ret; } torch::Tensor isotonic_kl_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) { auto batch = sol.size(0); auto n = sol.size(1); auto ret = torch::zeros_like(sol); auto sizes = torch::zeros_like(sol); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_kl_backward", ([&] { hipLaunchKernelGGL(( isotonic_kl_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return ret; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("isotonic_l2", &isotonic_l2, "Isotonic L2"); m.def("isotonic_l2_backward", &isotonic_l2_backward, "Isotonic L2 Backward"); m.def("isotonic_kl", &isotonic_kl, "Isotonic KL"); m.def("isotonic_kl_backward", &isotonic_kl_backward, "Isotonic KL Backward"); }
d9386b5d8b3cd8a3c8187be53062256962ef9c88.cu
// Copyright 2007-2020 The scikit-learn developers. // Copyright 2020 Google LLC. // Copyright 2021 Teddy Koker. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // a. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // b. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // c. Neither the name of the Scikit-learn Developers nor the names of // its contributors may be used to endorse or promote products // derived from this software without specific prior written // permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. #include <torch/extension.h> // #include <cuda.h> // #include <cuda_runtime.h> // #include <iostream> // Copied from fast-soft-sort (https://bit.ly/3r0gOav) with the following modifications: // - replace numpy functions with torch equivalents // - re-write in CUDA // - return solution in place // - added backward pass (vector jacobian product) // Copied from scikit-learn with the following modifications: // - use decreasing constraints by default, // - do not return solution in place, rather save in array `sol`, // - avoid some needless multiplications. // namespace { // Numerically stable log-add-exp template <typename scalar_t> __device__ __forceinline__ scalar_t log_add_exp(scalar_t x, scalar_t y) { scalar_t larger = max(x, y); scalar_t smaller = min(x, y); return larger + log1p(exp(smaller - larger)); } // Returns partition corresponding to solution. Expects sizes to be zeros template <typename scalar_t> __device__ void partition( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> solution, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int b) { const scalar_t eps = 1.0e-9; int tail = 1; if (n > 0) { sizes[b][0] = 1; } for (int i = 1; i < n; i++) { if (std::abs(solution[b][i] - solution[b][i - 1]) > eps) { tail += 1; } sizes[b][tail - 1] += 1; } } template <typename scalar_t> __global__ void isotonic_l2_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sums, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> c, int n, int batch) { const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } // target describes a list of blocks. at any time, if [i..j] (inclusive) is // an active block, then target[i] := j and target[j] := i. for (int i = 0; i < n; i++) { c[b][i] = 1.0; sol[b][i] = s[b][i]; sums[b][i] = s[b][i]; target[b][i] = i; } int i = 0; while (i < n) { auto k = target[b][i] + 1; if (k == n) { break; } if (sol[b][i] > sol[b][k]) { i = k; continue; } auto sum_y = sums[b][i]; auto sum_c = c[b][i]; while (true) { // We are within an increasing subsequence auto prev_y = sol[b][k]; sum_y += sums[b][k]; sum_c += c[b][k]; k = target[b][k] + 1; if ((k == n) || (prev_y > sol[b][k])) { // Non-singleton increasing subsequence is finished, // update first entry. sol[b][i] = sum_y / sum_c; sums[b][i] = sum_y; c[b][i] = sum_c; target[b][i] = k - 1; target[b][k - 1] = i; if (i > 0) { // Backtrack if we can. This makes the algorithm // single-pass and ensures O(n) complexity. i = target[b][i - 1]; } // Otherwise, restart from the same point break; } } } // Reconstruct the solution i = 0; while (i < n) { auto k = target[b][i] + 1; for (int j = i + 1; j < k; j++) { sol[b][j] = sol[b][i]; } i = k; } } template <typename scalar_t> __global__ void isotonic_kl_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> y, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> w, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_y_, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> lse_w_, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> target, int n, int batch) { const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } // target describes a list of blocks. At any time, if [i..j] (inclusive) is // an active block, then target[i] := j and target[j] := i. for (int i = 0; i < n; i++) { sol[b][i] = y[b][i] - w[b][i]; lse_y_[b][i] = y[b][i]; lse_w_[b][i] = w[b][i]; target[b][i] = i; } int i = 0; while (i < n) { auto k = target[b][i] + 1; if (k == n) { break; } if (sol[b][i] > sol[b][k]) { i = k; continue; } auto lse_y = lse_y_[b][i]; auto lse_w = lse_w_[b][i]; while (true) { // We are within an increasing subsequence auto prev_y = sol[b][k]; lse_y = log_add_exp(lse_y, lse_y_[b][k]); lse_w = log_add_exp(lse_w, lse_w_[b][k]); k = target[b][k] + 1; if ((k == n) || (prev_y > sol[b][k])) { // Non-singleton increasing subsequence is finished, // update first entry. sol[b][i] = lse_y - lse_w; lse_y_[b][i] = lse_y; lse_w_[b][i] = lse_w; target[b][i] = k - 1; target[b][k - 1] = i; if (i > 0) { // Backtrack if we can. This makes the algorithm // single-pass and ensures O(n) complexity. i = target[b][i - 1]; } // Otherwise, restart from the same point break; } } } // Reconstruct the solution i = 0; while (i < n) { auto k = target[b][i] + 1; for (int j = i + 1; j < k; j++) { sol[b][j] = sol[b][i]; } i = k; } } template <typename scalar_t> __global__ void isotonic_l2_backward_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, // not used torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int batch) { int end; scalar_t sum; scalar_t val; const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } int start = 0; partition(sol, sizes, n, b); for (int size = 0; (sizes[b][size] > 0 && size < n); size++) { end = start + sizes[b][size]; sum = 0; val = 1.0 / (scalar_t) sizes[b][size]; for (int i = start; i < end; i++) { sum += grad_input[b][i]; } for (int i = start; i < end; i++) { ret[b][i] = val * sum; } start = end; } } template <typename scalar_t> __global__ void isotonic_kl_backward_kernel( torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> s, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sol, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> grad_input, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> ret, torch::PackedTensorAccessor32<scalar_t, 2, torch::RestrictPtrTraits> sizes, int n, int batch) { int end; scalar_t sum; scalar_t softmax; const int b = blockIdx.x * blockDim.x + threadIdx.x; if (b >= batch) { // outside the batch return; } int start = 0; partition(sol, sizes, n, b); for (int size = 0; (sizes[b][size] > 0 && size < n); size++) { end = start + sizes[b][size]; sum = 0; softmax = 0; for (int i = start; i < end; i++) { softmax += std::exp(s[b][i]); sum += grad_input[b][i]; } for (int i = start; i < end; i++) { ret[b][i] = std::exp(s[b][i]) / softmax * sum; } start = end; } } // Solves an isotonic regression problem using PAV. // Formally, it solves argmin_{v_1 >= ... >= v_n} 0.5 ||v - y||^2. torch::Tensor isotonic_l2(torch::Tensor y) { auto batch = y.size(0); auto n = y.size(1); auto sol = torch::zeros_like(y); auto sums = torch::zeros_like(y); auto target = torch::zeros_like(y); auto c = torch::zeros_like(y); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_l2", ([&] { isotonic_l2_kernel<scalar_t><<<blocks, threads>>>( y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sums.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), c.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return sol; } // Solves isotonic optimization with KL divergence using PAV. // Formally, it solves argmin_{v_1 >= ... >= v_n} <e^{y-v}, 1> + <e^w, v>. torch::Tensor isotonic_kl(torch::Tensor y, torch::Tensor w) { auto batch = y.size(0); auto n = y.size(1); auto sol = torch::zeros_like(y); auto lse_y_ = torch::zeros_like(y); auto lse_w_ = torch::zeros_like(y); auto target = torch::zeros_like(y); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(y.scalar_type(), "isotonic_kl", ([&] { isotonic_kl_kernel<scalar_t><<<blocks, threads>>>( y.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), w.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), lse_y_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), lse_w_.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), target.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return sol; } torch::Tensor isotonic_l2_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) { auto batch = sol.size(0); auto n = sol.size(1); auto ret = torch::zeros_like(sol); auto sizes = torch::zeros_like(sol); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_l2_backward", ([&] { isotonic_l2_backward_kernel<scalar_t><<<blocks, threads>>>( s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return ret; } torch::Tensor isotonic_kl_backward(torch::Tensor s, torch::Tensor sol, torch::Tensor grad_input) { auto batch = sol.size(0); auto n = sol.size(1); auto ret = torch::zeros_like(sol); auto sizes = torch::zeros_like(sol); const int threads = 1024; const int blocks = (batch + threads - 1) / threads; AT_DISPATCH_FLOATING_TYPES_AND_HALF(sol.scalar_type(), "isotonic_kl_backward", ([&] { isotonic_kl_backward_kernel<scalar_t><<<blocks, threads>>>( s.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sol.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), grad_input.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), ret.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), sizes.packed_accessor32<scalar_t, 2, torch::RestrictPtrTraits>(), n, batch); })); return ret; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("isotonic_l2", &isotonic_l2, "Isotonic L2"); m.def("isotonic_l2_backward", &isotonic_l2_backward, "Isotonic L2 Backward"); m.def("isotonic_kl", &isotonic_kl, "Isotonic KL"); m.def("isotonic_kl_backward", &isotonic_kl_backward, "Isotonic KL Backward"); }
5bd2c8cc5e60a251216e290fcce03643e1c731b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // UCSD Phys244 // Spring 2018 // Andreas Goetz ([email protected]) // CUDA program to square matrix elements in parallel on the GPU // // /* FIXME */ COMMENTS NEED MODIFICATION // #include<stdio.h> // define matrix size, number of blocks NBL and threads per block TPB #define NROW 2048 #define NCOL 512 #define NBLX 32 #define NBLY 32 #define TPBX 16 #define TPBY 16 // // CUDA device function that squares elements of a 2D array // __global__ void square(int *arr, int maxrow, int maxcol){ int row = /* FIXME */ int colinit = threadIdx.y + blockDim.y * blockIdx.y; int rowstride = /* FIXME */ int colstride = /* FIXME */ int pos; // operate on all submatrices for (int row = rowinit; row < maxrow; row += rowstride) { for (int col = colinit; col < maxcol; col += colstride) { pos = row*maxcol + col; arr[pos] *= arr[pos]; } } } // // main program // int main(void){ int h_a[NROW][NCOL]; int *d_a; int size = NROW * NCOL * sizeof(int); int i, j, err; // allocate device memory hipMalloc((void **)&d_a, size); // initialize matrix for (i=0; i<NROW; i++){ for (j=0; j<NCOL; j++){ h_a[i][j] = i+j; // printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]); } } // copy input data to device hipMemcpy(/* FIXME */); // add vectors by launching a sufficient number of blocks of the add() kernel printf("\nLaunching kernel to square matrix elements...\n"); printf("Matrix elements = %d x %d = %d\n",NROW,NCOL,NROW*NCOL); printf("Blocks = %d x %d = %d\n",NBLX,NBLY,NBLX*NBLY); printf("Threads per block = %d x %d = %d\n",TPBX,TPBY,TPBX*TPBY); printf("Kernel copies = %d\n",NBLX*NBLY*TPBX*TPBY); square<<</* FIXME */>>>(d_a, NROW, NCOL); // copy results back to host hipMemcpy(/* FIXME */); // deallocate memory hipFree(d_a); // check results err = 0; for (i=0; i<NROW; i++){ for (j=0; j<NCOL; j++){ if (h_a[i][j] != (i+j)*(i+j)) err += 1; //printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]); } } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } return 0; }
5bd2c8cc5e60a251216e290fcce03643e1c731b6.cu
// UCSD Phys244 // Spring 2018 // Andreas Goetz ([email protected]) // CUDA program to square matrix elements in parallel on the GPU // // /* FIXME */ COMMENTS NEED MODIFICATION // #include<stdio.h> // define matrix size, number of blocks NBL and threads per block TPB #define NROW 2048 #define NCOL 512 #define NBLX 32 #define NBLY 32 #define TPBX 16 #define TPBY 16 // // CUDA device function that squares elements of a 2D array // __global__ void square(int *arr, int maxrow, int maxcol){ int row = /* FIXME */ int colinit = threadIdx.y + blockDim.y * blockIdx.y; int rowstride = /* FIXME */ int colstride = /* FIXME */ int pos; // operate on all submatrices for (int row = rowinit; row < maxrow; row += rowstride) { for (int col = colinit; col < maxcol; col += colstride) { pos = row*maxcol + col; arr[pos] *= arr[pos]; } } } // // main program // int main(void){ int h_a[NROW][NCOL]; int *d_a; int size = NROW * NCOL * sizeof(int); int i, j, err; // allocate device memory cudaMalloc((void **)&d_a, size); // initialize matrix for (i=0; i<NROW; i++){ for (j=0; j<NCOL; j++){ h_a[i][j] = i+j; // printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]); } } // copy input data to device cudaMemcpy(/* FIXME */); // add vectors by launching a sufficient number of blocks of the add() kernel printf("\nLaunching kernel to square matrix elements...\n"); printf("Matrix elements = %d x %d = %d\n",NROW,NCOL,NROW*NCOL); printf("Blocks = %d x %d = %d\n",NBLX,NBLY,NBLX*NBLY); printf("Threads per block = %d x %d = %d\n",TPBX,TPBY,TPBX*TPBY); printf("Kernel copies = %d\n",NBLX*NBLY*TPBX*TPBY); square<<</* FIXME */>>>(d_a, NROW, NCOL); // copy results back to host cudaMemcpy(/* FIXME */); // deallocate memory cudaFree(d_a); // check results err = 0; for (i=0; i<NROW; i++){ for (j=0; j<NCOL; j++){ if (h_a[i][j] != (i+j)*(i+j)) err += 1; //printf("Element (%d,%d) = %d\n",i,j,h_a[i][j]); } } if (err != 0){ printf("\n Error, %d elements do not match!\n\n", err); } else { printf("\n Success! All elements match.\n\n"); } return 0; }
31c4c6dca41918768f1a3fe35d7484f190425a6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"firstorder.h" int *Calculate_firstorder_rl(const int *image, int *Range, int MASK_VALUE, int *size, int *stride,int Ng, int bin_width, int batch_size) { //START_TIMER(time) // getting the max value of the image intensity int dev_bin = Ng * bin_width; int nbytes_image = sizeof(int) * size[0] * size[1] * batch_size; //int nbytes_mask = sizeof(int) * size[0] * size[1] * batch_size; int nbytes_firstorder = sizeof(int) * dev_bin * batch_size; //int *glcm = (int*)malloc(nbytes_glcm); int *dev_size = NULL; int *dev_stride = NULL; int *dev_P = NULL; int *dev_image = NULL; hipMalloc((void**)&dev_P, nbytes_firstorder); hipMalloc((void**)&dev_image, nbytes_image); dim3 grids_Pn(1, 1, batch_size); dim3 threads_Pn(bin_width, Ng); hipLaunchKernelGGL(( initialize), dim3(grids_Pn), dim3(threads_Pn), 0, 0, dev_P); hipMalloc((void**)&dev_size, sizeof(int) * 2); hipMalloc((void**)&dev_stride, sizeof(int) * 2); //ANDLE_ERROR(hipMemcpy((void*)dev_image, (void*)image, nbytes_image, hipMemcpyHostToDevice)); //HANDLE_ERROR(hipMemcpy((void*)dev_mask, (void*)mask, nbytes_mask, hipMemcpyHostToDevice)); //HANDLE_ERROR(hipMemcpy((void*)dev_glcm, (void*)glcm, nbytes_glcm, hipMemcpyHostToDevice)); hipMemcpy(dev_size, size, sizeof(int) * 2, hipMemcpyHostToDevice); hipMemcpy(dev_stride, stride, sizeof(int) * 2, hipMemcpyHostToDevice); //printf("copying: "); //PRINT_TIME(time) //printf("\n"); dim3 grids(size[0] / 8, size[1] / 8, batch_size); dim3 threads(64, 1); //START_TIMER(time) //START_TIMER(time) hipLaunchKernelGGL(( Preprocessing_image_firstorder), dim3(grids), dim3(threads), 0, 0, dev_image, image, Range[0], Range[1],MASK_VALUE); hipLaunchKernelGGL(( calculate_firstorder_kernel_rl), dim3(grids), dim3(threads), 0, 0, dev_image, dev_P, dev_size, dev_stride, dev_bin); hipDeviceSynchronize(); hipFree(dev_stride); hipFree(dev_size); hipFree(dev_image); return dev_P; //return dev_glcm; } void Calculate_firstorder_Property(PROPERTY_fo *Property_fo, float Epsilon, int bin_width, int Ng, int batch_size) { // getting the range of the image intensity int dev_bin = bin_width * Ng; if (Property_fo->Np != NULL) { delete Property_fo->Np; Property_fo->Np = NULL; //printf("Property_glcm->s != NULL! \n"); } if (Property_fo->Pn != NULL) { delete Property_fo->Pn; Property_fo->Pn = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->PF != NULL) { delete Property_fo->PF; Property_fo->PF = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->Pf != NULL) { delete Property_fo->Pf; Property_fo->Pf = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->pn != NULL) { delete Property_fo->pn; Property_fo->pn = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->P10 != NULL) { delete Property_fo->P10; Property_fo->P10 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P25 != NULL) { delete Property_fo->P25; Property_fo->P25 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P50 != NULL) { delete Property_fo->P50; Property_fo->P50 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P75 != NULL) { delete Property_fo->P75; Property_fo->P75 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P90 != NULL) { delete Property_fo->P90; Property_fo->P90 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->N1090 != NULL) { delete Property_fo->N1090; Property_fo->N1090 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->mP1090 != NULL) { delete Property_fo->mP1090; Property_fo->mP1090 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pmax != NULL) { delete Property_fo->Pmax; Property_fo->Pmax = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pmin != NULL) { delete Property_fo->Pmin; Property_fo->Pmin = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pm != NULL) { delete Property_fo->Pm; Property_fo->Pm = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pv != NULL) { delete Property_fo->Pv; Property_fo->Pv = NULL; //printf("Property_glcm->Py != NULL! \n"); } hipDeviceSynchronize(); hipMalloc((void**)&(Property_fo->Np), sizeof(float) * batch_size); dim3 grids_Np(1, 1, batch_size); dim3 threads_Np(1, 1); hipLaunchKernelGGL(( initialize_tex), dim3(grids_Np), dim3(threads_Np), 0, 0, Property_fo->Np); hipMalloc((void**)&Property_fo->Pn, sizeof(float) * dev_bin * batch_size); dim3 grids_Pn(1, 1, batch_size); dim3 threads_Pn(Ng, bin_width); hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pn), dim3(threads_Pn), 0, 0, Property_fo->Pn); hipMalloc((void**)&Property_fo->pn, sizeof(float) * Ng * batch_size); dim3 grids_pn(1, Ng, batch_size); dim3 threads_pn(1, 1); hipLaunchKernelGGL(( initialize_tex), dim3(grids_pn), dim3(threads_pn), 0, 0, Property_fo->pn); hipMalloc((void**)&Property_fo->PF, sizeof(int) * dev_bin * batch_size); dim3 grids_PF(1, 1, batch_size); dim3 threads_PF(Ng, bin_width); hipLaunchKernelGGL(( initialize), dim3(grids_PF), dim3(threads_PF), 0, 0, Property_fo->PF); hipMalloc((void**)&Property_fo->Pf, sizeof(float) * dev_bin * batch_size); dim3 grids_Pf(1, 1, batch_size); dim3 threads_Pf(Ng, bin_width); hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pf), dim3(threads_Pf), 0, 0, Property_fo->Pf); hipMalloc((void**)&Property_fo->P25, sizeof(int) * batch_size); dim3 grids_P25(1, 1, batch_size); dim3 threads_P25(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_P25), dim3(threads_P25), 0, 0, Property_fo->P25); hipMalloc((void**)&Property_fo->P75, sizeof(int) * batch_size); dim3 grids_P75(1, 1, batch_size); dim3 threads_P75(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_P75), dim3(threads_P75), 0, 0, Property_fo->P75); hipMalloc((void**)&Property_fo->P50, sizeof(int) * batch_size); dim3 grids_P50(1, 1, batch_size); dim3 threads_P50(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_P50), dim3(threads_P50), 0, 0, Property_fo->P50); hipMalloc((void**)&Property_fo->P90, sizeof(int) * batch_size); dim3 grids_P90(1, 1, batch_size); dim3 threads_P90(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_P90), dim3(threads_P90), 0, 0, Property_fo->P90); hipMalloc((void**)&Property_fo->P10, sizeof(int) * batch_size); dim3 grids_P10(1, 1, batch_size); dim3 threads_P10(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_P10), dim3(threads_P10), 0, 0, Property_fo->P10); hipMalloc((void**)&Property_fo->N1090, sizeof(int) * batch_size); dim3 grids_N1090(1, 1, batch_size); dim3 threads_N1090(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_N1090), dim3(threads_N1090), 0, 0, Property_fo->N1090); hipMalloc((void**)&Property_fo->mP1090, sizeof(float) * batch_size); dim3 grids_mP1090(1, 1, batch_size); dim3 threads_mP1090(1, 1); hipLaunchKernelGGL(( initialize_tex), dim3(grids_mP1090), dim3(threads_mP1090), 0, 0, Property_fo->mP1090); hipMalloc((void**)&Property_fo->Pmin, sizeof(int) * batch_size); dim3 grids_Pmin(1, 1, batch_size); dim3 threads_Pmin(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_Pmin), dim3(threads_Pmin), 0, 0, Property_fo->Pmin); hipMalloc((void**)&Property_fo->Pmax, sizeof(int) * batch_size); dim3 grids_Pmax(1, 1, batch_size); dim3 threads_Pmax(1, 1); hipLaunchKernelGGL(( initialize), dim3(grids_Pmax), dim3(threads_Pmax), 0, 0, Property_fo->Pmax); hipMalloc((void**)&Property_fo->Pm, sizeof(float) * batch_size); dim3 grids_Pm(1, 1, batch_size); dim3 threads_Pm(1, 1); hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pm), dim3(threads_Pm), 0, 0, Property_fo->Pm); hipMalloc((void**)&Property_fo->Pv, sizeof(float) * batch_size); dim3 grids_Pv(1, 1, batch_size); dim3 threads_Pv(1, 1); hipLaunchKernelGGL(( initialize_tex), dim3(grids_Pv), dim3(threads_Pv), 0, 0, Property_fo->Pv); hipDeviceSynchronize(); dim3 grids(1, 1, batch_size); dim3 threads(Ng, bin_width); hipLaunchKernelGGL(( firstorder_Np), dim3(grids), dim3(threads), 0, 0, Property_fo->P, Property_fo->Np, dev_bin); //printf("Get Property_glcm sum! \n"); //HANDLE_ERROR(hipMemcpy(Property_glcm->s, s, sizeof(float), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_Pn), dim3(grids), dim3(threads), 0, 0, Property_fo->Pn, Property_fo->P, Property_fo->Np, dev_bin); //printf("Get Property_glcm Pn! \n"); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_pn), dim3(grids), dim3(threads), 0, 0, Property_fo->pn, Property_fo->Pn, Ng, dev_bin, bin_width); hipLaunchKernelGGL(( firstorder_PF), dim3(grids), dim3(threads), 0, 0, Property_fo->PF, Property_fo->P, dev_bin); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_Pf), dim3(grids), dim3(threads), 0, 0, Property_fo->Pf, Property_fo->PF, Property_fo->Np, dev_bin); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_P10), dim3(grids), dim3(threads), 0, 0, Property_fo->P10, Property_fo->Pf, dev_bin); hipLaunchKernelGGL(( firstorder_P25), dim3(grids), dim3(threads), 0, 0, Property_fo->P25, Property_fo->Pf, dev_bin); hipLaunchKernelGGL(( firstorder_P50), dim3(grids), dim3(threads), 0, 0, Property_fo->P50, Property_fo->Pf, dev_bin); hipLaunchKernelGGL(( firstorder_P75), dim3(grids), dim3(threads), 0, 0, Property_fo->P75, Property_fo->Pf, dev_bin); hipLaunchKernelGGL(( firstorder_P90), dim3(grids), dim3(threads), 0, 0, Property_fo->P90, Property_fo->Pf, dev_bin); hipLaunchKernelGGL(( firstorder_Pmin), dim3(grids), dim3(threads), 0, 0, Property_fo->Pmin, Property_fo->PF, dev_bin); hipLaunchKernelGGL(( firstorder_Pmax), dim3(grids), dim3(threads), 0, 0, Property_fo->Pmax, Property_fo->PF, dev_bin); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_N1090), dim3(grids), dim3(threads), 0, 0, Property_fo->N1090, Property_fo->P, Property_fo->P10, Property_fo->P90, dev_bin); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_mP1090), dim3(grids), dim3(threads), 0, 0, Property_fo->mP1090, Property_fo->N1090, Property_fo->Pn, Property_fo->Np, Property_fo->P10, Property_fo->P90, dev_bin); hipLaunchKernelGGL(( firstorder_Pm), dim3(grids), dim3(threads), 0, 0, Property_fo->Pm, Property_fo->Pn, dev_bin); hipDeviceSynchronize(); hipLaunchKernelGGL(( firstorder_Pv), dim3(grids), dim3(threads), 0, 0, Property_fo->Pv, Property_fo->Pm, Property_fo->Pn, dev_bin); } void Calculate_firstorder_Texture_rl(PROPERTY_fo *Property_fo, float *texture_fo, float Epsilon, int Ng, int bin_width, int batch_size) { //START_TIMER(time) //float *Texture_fo = (float*)malloc(sizeof(float) * 19 * batch_size); //printf("Texture_glcm initialized! \n"); // getting the range of the image intensity int dev_bin = bin_width * Ng; dim3 grids1(1, 1, batch_size); dim3 threads1(18, 1); //hipMalloc((void**)&texture_glcm, sizeof(float) * NA * batch_size); hipLaunchKernelGGL(( initialize_tex), dim3(grids1), dim3(threads1), 0, 0, texture_fo); hipDeviceSynchronize(); dim3 grids(1, 1, batch_size); dim3 threads(Ng, bin_width); //printf("getting Texture_glcm CUDA \n"); //Calculate_GLCM_Texture_glcm_kernel<<<grids, threads>>>(Texture_glcm, Property_glcm, Ng); hipLaunchKernelGGL(( f1_Energy), dim3(grids), dim3(threads), 0, 0, &texture_fo[0 * batch_size], Property_fo->Pn, Property_fo->Np, dev_bin); //hipMemcpy(&Texture_fo[0 * batch_size], &texture_fo[0 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipLaunchKernelGGL(( f3_Entropy), dim3(grids), dim3(threads), 0, 0, &texture_fo[1 * batch_size], Property_fo->pn, Property_fo->Np, dev_bin, bin_width, Epsilon); //hipMemcpy(&Texture_fo[1 * batch_size], &texture_fo[1 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f4_Minimum), dim3(grids), dim3(threads), 0, 0, &texture_fo[2 * batch_size], Property_fo->Pmin, dev_bin); //hipMemcpy(&Texture_fo[2 * batch_size], &texture_fo[2 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f5_TenthPercentile), dim3(grids), dim3(threads), 0, 0, &texture_fo[3 * batch_size], Property_fo->P10, dev_bin); //hipMemcpy(&Texture_fo[3 * batch_size], &texture_fo[3 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f6_NinetiePercentile), dim3(grids), dim3(threads), 0, 0, &texture_fo[4 * batch_size], Property_fo->P90, dev_bin); //hipMemcpy(&Texture_fo[4 * batch_size], &texture_fo[4 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f7_Maximum), dim3(grids), dim3(threads), 0, 0, &texture_fo[5 * batch_size], Property_fo->Pmax, dev_bin); //hipMemcpy(&Texture_fo[5 * batch_size], &texture_fo[5 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f8_Mean), dim3(grids), dim3(threads), 0, 0, &texture_fo[6 * batch_size], Property_fo->Pm, dev_bin); //hipMemcpy(&Texture_fo[6 * batch_size], &texture_fo[6 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f9_Median), dim3(grids), dim3(threads), 0, 0, &texture_fo[7 * batch_size], Property_fo->P50, dev_bin); //hipMemcpy(&Texture_fo[7 * batch_size], &texture_fo[7 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f10_InterquartileRange), dim3(grids), dim3(threads), 0, 0, &texture_fo[8 * batch_size], Property_fo->P25, Property_fo->P75, dev_bin); //hipMemcpy(&Texture_fo[8 * batch_size], &texture_fo[8 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f11_Range), dim3(grids), dim3(threads), 0, 0, &texture_fo[9 * batch_size], Property_fo->Pmin, Property_fo->Pmax, dev_bin); //hipMemcpy(&Texture_fo[9 * batch_size], &texture_fo[9 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f12_MAD), dim3(grids), dim3(threads), 0, 0, &texture_fo[10 * batch_size], Property_fo->Pn, Property_fo->Pm, Property_fo->Np, dev_bin); //hipMemcpy(&Texture_fo[10 * batch_size], &texture_fo[10 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f13_rMAD), dim3(grids), dim3(threads), 0, 0, &texture_fo[11 * batch_size], Property_fo->N1090, Property_fo->mP1090, Property_fo->P10, Property_fo->P90, Property_fo->Pn, Property_fo->Np, dev_bin); //hipMemcpy(&Texture_fo[11 * batch_size], &texture_fo[11 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipLaunchKernelGGL(( f14_RMS), dim3(grids), dim3(threads), 0, 0, &texture_fo[12 * batch_size], &texture_fo[0 * batch_size], Property_fo->Np, dev_bin); //hipMemcpy(&Texture_fo[12 * batch_size], &texture_fo[12 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f15_StandardDeviation), dim3(grids), dim3(threads), 0, 0, &texture_fo[13 * batch_size], Property_fo->Pv, dev_bin); //hipMemcpy(&Texture_fo[13 * batch_size], &texture_fo[13 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f16_Skewness), dim3(grids), dim3(threads), 0, 0, &texture_fo[14 * batch_size], Property_fo->Pm, Property_fo->Pn, Property_fo->Pv, dev_bin, Epsilon); //hipMemcpy(&Texture_fo[14 * batch_size], &texture_fo[14 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f17_Kurtosis), dim3(grids), dim3(threads), 0, 0, &texture_fo[15 * batch_size], Property_fo->Pm, Property_fo->Pn, Property_fo->Pv, dev_bin, Epsilon); //hipMemcpy(&Texture_fo[15 * batch_size], &texture_fo[15 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f18_Variance), dim3(grids), dim3(threads), 0, 0, &texture_fo[16 * batch_size], Property_fo->Pv, dev_bin); //hipMemcpy(&Texture_fo[16 * batch_size], &texture_fo[16 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipLaunchKernelGGL(( f19_Uniformity), dim3(grids), dim3(threads), 0, 0, &texture_fo[17 * batch_size], Property_fo->pn, dev_bin, bin_width); //hipMemcpy(&Texture_fo[17 * batch_size], &texture_fo[17 * batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); //f20_Volume<<<grids, threads>>>(&texture_fo[18 * batch_size], Property_fo->Np, dev_bin); //hipMemcpy(&Texture_fo[18 * batch_size], &texture_fo[18* batch_size], sizeof(float) * batch_size, hipMemcpyDeviceToHost); hipDeviceSynchronize(); //STOP_TIMER(time) /* printf("getting Texture_glcm: \n"); printf("f1_Energy: %f \n", Texture_fo[0 * batch_size]); printf("f3_Entropy: %f \n", Texture_fo[1 * batch_size]); printf("f4_Minimum: %f \n", Texture_fo[2 * batch_size]); printf("f5_TenthPercentile: %f \n", Texture_fo[3 * batch_size]); printf("f6_NinetiethPercentile: %f \n", Texture_fo[4 * batch_size]); printf("f7_Maximum: %f \n", Texture_fo[5 * batch_size]); printf("f8_Mean: %f \n", Texture_fo[6 * batch_size]); printf("f9_Median: %f \n", Texture_fo[7 * batch_size]); printf("f10_InterquartileRange: %f \n", Texture_fo[8 * batch_size]); printf("f11_Range: %f \n", Texture_fo[9 * batch_size]); printf("f12_MAD: %f \n", Texture_fo[10 * batch_size]); printf("f13_rMAD: %f \n", Texture_fo[11 * batch_size]); printf("f14_RMS: %f \n", Texture_fo[12 * batch_size]); printf("f15_StandardDeviation: %f \n", Texture_fo[13 * batch_size]); printf("f16_SKewness: %f \n", Texture_fo[14 * batch_size]); printf("f17_Kurtosis: %f \n", Texture_fo[15 * batch_size]); printf("f18_Variance: %f \n", Texture_fo[16 * batch_size]); printf("f19_Uniformity: %f \n", Texture_fo[17 * batch_size]); printf("f20_Uniformity: %f \n", Texture_fo[18 * batch_size]); printf("CUDA Texture_glcm finished \n"); */ //delete Property_glcm; //free(Texture_fo); } __global__ void calculate_firstorder_kernel_rl(int *image, int *P, int *dev_size, int *dev_stride, int dev_bin) { int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int j, P_idx, ix, iy; int img_ith, ipix; img_ith = ip / (dev_size[0] * dev_size[1]); ipix = ip % (dev_size[0] * dev_size[1]); ix = ipix / dev_stride[0]; iy = ipix % dev_stride[0]; P_idx = int(image[ip] > -1) * (image[ip] + img_ith * dev_bin); atomicAdd(&P[P_idx], int(1) * int(image[ip] > -1)); } __global__ void firstorder_Np(int *P, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith, ipix; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Np[img_ith], float(P[ip])); } // histogram of 0-255 __global__ void firstorder_Pn(float *Pn, int *P, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&Pn[ip], float(P[ip]) / Np[img_ith]); } __global__ void firstorder_pn(float *pn, float *Pn, int dev_ng, int dev_bin, int bin_width){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&pn[ipix / bin_width + img_ith * dev_ng], float(Pn[ip])); } __global__ void firstorder_PF(int *PF, int *P, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; for(int i = 0; i < dev_bin; i++){ atomicAdd(&PF[ip], P[i + img_ith * dev_bin] * float(i <= ipix)); } } __global__ void firstorder_Pf(float *Pf, int *PF, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&Pf[ip], float(PF[ip]) / Np[img_ith]); } __global__ void firstorder_P25(int *P25, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P25[img_ith], float(ipix + 1) * float(Pf[ip] < 0.25)); } __global__ void firstorder_P50(int *P50, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P50[img_ith], float(ipix + 1) * float(Pf[ip] < 0.5)); } __global__ void firstorder_P75(int *P75, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P75[img_ith], float(ipix + 1) * float(Pf[ip] < 0.75)); } __global__ void firstorder_P10(int *P10, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P10[img_ith], int(ipix + 1) * int(Pf[ip] < 0.1)); } __global__ void firstorder_P90(int *P90, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P90[img_ith], float(ipix + 1) * float(Pf[ip] < 0.9)); } __global__ void firstorder_Pmin(int *Pmin, int *PF, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMin(&Pmin[img_ith], PF[ip] > 0? ipix:255); } __global__ void firstorder_Pmax(int *Pmax, int *PF, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&Pmax[img_ith], ipix * int(PF[ip] > 0)); } __global__ void firstorder_Pm(float *Pm, float *Pn, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Pm[img_ith], Pn[ip] * float(ipix)); } __global__ void firstorder_N1090(int *N1090, int *P, int *P10, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&N1090[img_ith], P[ip] * int(ipix >= P10[img_ith]) * int(ipix <= P90[img_ith])); } __global__ void firstorder_mP1090(float *mP1090, int *N1090, float *Pn, float *Np, int *P10, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&mP1090[img_ith], Pn[ip] * float(ipix) * Np[img_ith] * float(ipix >= P10[img_ith]) * float(ipix <= P90[img_ith]) / float(N1090[img_ith])); } __global__ void firstorder_Pv(float *Pv, float *Pm, float *Pn, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Pv[img_ith], powf(ipix - Pm[img_ith], 2) * Pn[ip]); } __global__ void f1_Energy(float *rst, float *Pn, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(float(ipix), 2) * Np[img_ith] * Pn[ip]); } __global__ void f3_Entropy(float *rst, float *pn, float *Np, int dev_bin, int bin_width, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith, ipig; img_ith = ip / dev_bin; ipix = ip % dev_bin; ipig = ipix / bin_width; atomicAdd(&rst[img_ith], -log2f(pn[ipig + img_ith * dev_bin / bin_width] + Epsilon) * pn[ipig + img_ith * dev_bin / bin_width] * float(ipix % bin_width == 0)); } __global__ void f4_Minimum(float *rst, int *Pmin, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], float(Pmin[img_ith])); } __global__ void f5_TenthPercentile(float *rst, int *P10, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P10[img_ith])); } __global__ void f6_NinetiePercentile(float *rst, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P90[img_ith])); } __global__ void f7_Maximum(float *rst, int *Pmax, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], float(Pmax[img_ith])); } __global__ void f8_Mean(float *rst, float *Pm, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], Pm[img_ith]); } __global__ void f9_Median(float *rst, int *P50, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P50[img_ith])); } __global__ void f10_InterquartileRange(float *rst, int *P25, int *P75, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P75[img_ith] - P25[img_ith])); } __global__ void f11_Range(float *rst, int *Pmin, int *Pmax, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(Pmax[img_ith] - Pmin[img_ith])); } __global__ void f12_MAD(float *rst, float *Pn, float *Pm, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], abs(float(ipix) - Pm[img_ith]) * Pn[ip]); } __global__ void f13_rMAD(float *rst, int *N1090, float *mP1090, int *P10, int *P90, float *Pn, float * Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], float(ipix>=P10[img_ith]) * float(ipix<=P90[img_ith]) * abs(ipix - mP1090[img_ith]) * Pn[ip] * Np[img_ith] / (N1090[img_ith] + 1)); } __global__ void f14_RMS(float *rst, float *Energy, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], sqrt(abs(Energy[img_ith]) / Np[img_ith])); } __global__ void f18_Variance(float *rst, float *Pv, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], Pv[img_ith]); //powf(ipix - Pm[img_ith], 2) * Pn[ip]); } __global__ void f15_StandardDeviation(float *rst, float *Pv, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], sqrtf(Pv[img_ith])); } __global__ void f16_Skewness(float *rst, float *Pm, float *Pn, float *Pv, int dev_bin, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(ipix - Pm[img_ith], 3) * Pn[ip] / (powf(Pv[img_ith], 1.5) + Epsilon)); } __global__ void f17_Kurtosis(float *rst, float *Pm, float *Pn, float *Pv, int dev_bin, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(ipix - Pm[img_ith], 4) * Pn[ip] / (powf(Pv[img_ith], 2) + Epsilon)); } __global__ void f19_Uniformity(float *rst, float *pn, int dev_bin, int bin_width){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipig, ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; ipig = ipix / bin_width; atomicAdd(&rst[img_ith], powf(pn[ipig + img_ith * dev_bin / bin_width], 2) * float(ipix % bin_width == 0)); } __global__ void f20_Volume(float *rst, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipig, ipix, img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], Np[img_ith]); }
31c4c6dca41918768f1a3fe35d7484f190425a6f.cu
#include"firstorder.h" int *Calculate_firstorder_rl(const int *image, int *Range, int MASK_VALUE, int *size, int *stride,int Ng, int bin_width, int batch_size) { //START_TIMER(time) // getting the max value of the image intensity int dev_bin = Ng * bin_width; int nbytes_image = sizeof(int) * size[0] * size[1] * batch_size; //int nbytes_mask = sizeof(int) * size[0] * size[1] * batch_size; int nbytes_firstorder = sizeof(int) * dev_bin * batch_size; //int *glcm = (int*)malloc(nbytes_glcm); int *dev_size = NULL; int *dev_stride = NULL; int *dev_P = NULL; int *dev_image = NULL; cudaMalloc((void**)&dev_P, nbytes_firstorder); cudaMalloc((void**)&dev_image, nbytes_image); dim3 grids_Pn(1, 1, batch_size); dim3 threads_Pn(bin_width, Ng); initialize<<<grids_Pn, threads_Pn>>>(dev_P); cudaMalloc((void**)&dev_size, sizeof(int) * 2); cudaMalloc((void**)&dev_stride, sizeof(int) * 2); //ANDLE_ERROR(cudaMemcpy((void*)dev_image, (void*)image, nbytes_image, cudaMemcpyHostToDevice)); //HANDLE_ERROR(cudaMemcpy((void*)dev_mask, (void*)mask, nbytes_mask, cudaMemcpyHostToDevice)); //HANDLE_ERROR(cudaMemcpy((void*)dev_glcm, (void*)glcm, nbytes_glcm, cudaMemcpyHostToDevice)); cudaMemcpy(dev_size, size, sizeof(int) * 2, cudaMemcpyHostToDevice); cudaMemcpy(dev_stride, stride, sizeof(int) * 2, cudaMemcpyHostToDevice); //printf("copying: "); //PRINT_TIME(time) //printf("\n"); dim3 grids(size[0] / 8, size[1] / 8, batch_size); dim3 threads(64, 1); //START_TIMER(time) //START_TIMER(time) Preprocessing_image_firstorder<<<grids, threads>>>(dev_image, image, Range[0], Range[1],MASK_VALUE); calculate_firstorder_kernel_rl<<<grids, threads>>>(dev_image, dev_P, dev_size, dev_stride, dev_bin); cudaDeviceSynchronize(); cudaFree(dev_stride); cudaFree(dev_size); cudaFree(dev_image); return dev_P; //return dev_glcm; } void Calculate_firstorder_Property(PROPERTY_fo *Property_fo, float Epsilon, int bin_width, int Ng, int batch_size) { // getting the range of the image intensity int dev_bin = bin_width * Ng; if (Property_fo->Np != NULL) { delete Property_fo->Np; Property_fo->Np = NULL; //printf("Property_glcm->s != NULL! \n"); } if (Property_fo->Pn != NULL) { delete Property_fo->Pn; Property_fo->Pn = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->PF != NULL) { delete Property_fo->PF; Property_fo->PF = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->Pf != NULL) { delete Property_fo->Pf; Property_fo->Pf = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->pn != NULL) { delete Property_fo->pn; Property_fo->pn = NULL; //printf("Property_glcm->Px != NULL! \n"); } if (Property_fo->P10 != NULL) { delete Property_fo->P10; Property_fo->P10 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P25 != NULL) { delete Property_fo->P25; Property_fo->P25 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P50 != NULL) { delete Property_fo->P50; Property_fo->P50 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P75 != NULL) { delete Property_fo->P75; Property_fo->P75 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->P90 != NULL) { delete Property_fo->P90; Property_fo->P90 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->N1090 != NULL) { delete Property_fo->N1090; Property_fo->N1090 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->mP1090 != NULL) { delete Property_fo->mP1090; Property_fo->mP1090 = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pmax != NULL) { delete Property_fo->Pmax; Property_fo->Pmax = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pmin != NULL) { delete Property_fo->Pmin; Property_fo->Pmin = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pm != NULL) { delete Property_fo->Pm; Property_fo->Pm = NULL; //printf("Property_glcm->Py != NULL! \n"); } if (Property_fo->Pv != NULL) { delete Property_fo->Pv; Property_fo->Pv = NULL; //printf("Property_glcm->Py != NULL! \n"); } cudaDeviceSynchronize(); cudaMalloc((void**)&(Property_fo->Np), sizeof(float) * batch_size); dim3 grids_Np(1, 1, batch_size); dim3 threads_Np(1, 1); initialize_tex<<<grids_Np, threads_Np>>>(Property_fo->Np); cudaMalloc((void**)&Property_fo->Pn, sizeof(float) * dev_bin * batch_size); dim3 grids_Pn(1, 1, batch_size); dim3 threads_Pn(Ng, bin_width); initialize_tex<<<grids_Pn, threads_Pn>>>(Property_fo->Pn); cudaMalloc((void**)&Property_fo->pn, sizeof(float) * Ng * batch_size); dim3 grids_pn(1, Ng, batch_size); dim3 threads_pn(1, 1); initialize_tex<<<grids_pn, threads_pn>>>(Property_fo->pn); cudaMalloc((void**)&Property_fo->PF, sizeof(int) * dev_bin * batch_size); dim3 grids_PF(1, 1, batch_size); dim3 threads_PF(Ng, bin_width); initialize<<<grids_PF, threads_PF>>>(Property_fo->PF); cudaMalloc((void**)&Property_fo->Pf, sizeof(float) * dev_bin * batch_size); dim3 grids_Pf(1, 1, batch_size); dim3 threads_Pf(Ng, bin_width); initialize_tex<<<grids_Pf, threads_Pf>>>(Property_fo->Pf); cudaMalloc((void**)&Property_fo->P25, sizeof(int) * batch_size); dim3 grids_P25(1, 1, batch_size); dim3 threads_P25(1, 1); initialize<<<grids_P25, threads_P25>>>(Property_fo->P25); cudaMalloc((void**)&Property_fo->P75, sizeof(int) * batch_size); dim3 grids_P75(1, 1, batch_size); dim3 threads_P75(1, 1); initialize<<<grids_P75, threads_P75>>>(Property_fo->P75); cudaMalloc((void**)&Property_fo->P50, sizeof(int) * batch_size); dim3 grids_P50(1, 1, batch_size); dim3 threads_P50(1, 1); initialize<<<grids_P50, threads_P50>>>(Property_fo->P50); cudaMalloc((void**)&Property_fo->P90, sizeof(int) * batch_size); dim3 grids_P90(1, 1, batch_size); dim3 threads_P90(1, 1); initialize<<<grids_P90, threads_P90>>>(Property_fo->P90); cudaMalloc((void**)&Property_fo->P10, sizeof(int) * batch_size); dim3 grids_P10(1, 1, batch_size); dim3 threads_P10(1, 1); initialize<<<grids_P10, threads_P10>>>(Property_fo->P10); cudaMalloc((void**)&Property_fo->N1090, sizeof(int) * batch_size); dim3 grids_N1090(1, 1, batch_size); dim3 threads_N1090(1, 1); initialize<<<grids_N1090, threads_N1090>>>(Property_fo->N1090); cudaMalloc((void**)&Property_fo->mP1090, sizeof(float) * batch_size); dim3 grids_mP1090(1, 1, batch_size); dim3 threads_mP1090(1, 1); initialize_tex<<<grids_mP1090, threads_mP1090>>>(Property_fo->mP1090); cudaMalloc((void**)&Property_fo->Pmin, sizeof(int) * batch_size); dim3 grids_Pmin(1, 1, batch_size); dim3 threads_Pmin(1, 1); initialize<<<grids_Pmin, threads_Pmin>>>(Property_fo->Pmin); cudaMalloc((void**)&Property_fo->Pmax, sizeof(int) * batch_size); dim3 grids_Pmax(1, 1, batch_size); dim3 threads_Pmax(1, 1); initialize<<<grids_Pmax, threads_Pmax>>>(Property_fo->Pmax); cudaMalloc((void**)&Property_fo->Pm, sizeof(float) * batch_size); dim3 grids_Pm(1, 1, batch_size); dim3 threads_Pm(1, 1); initialize_tex<<<grids_Pm, threads_Pm>>>(Property_fo->Pm); cudaMalloc((void**)&Property_fo->Pv, sizeof(float) * batch_size); dim3 grids_Pv(1, 1, batch_size); dim3 threads_Pv(1, 1); initialize_tex<<<grids_Pv, threads_Pv>>>(Property_fo->Pv); cudaDeviceSynchronize(); dim3 grids(1, 1, batch_size); dim3 threads(Ng, bin_width); firstorder_Np<<<grids, threads>>>(Property_fo->P, Property_fo->Np, dev_bin); //printf("Get Property_glcm sum! \n"); //HANDLE_ERROR(cudaMemcpy(Property_glcm->s, s, sizeof(float), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); firstorder_Pn<<<grids, threads>>>(Property_fo->Pn, Property_fo->P, Property_fo->Np, dev_bin); //printf("Get Property_glcm Pn! \n"); cudaDeviceSynchronize(); firstorder_pn<<<grids, threads>>>(Property_fo->pn, Property_fo->Pn, Ng, dev_bin, bin_width); firstorder_PF<<<grids, threads>>>(Property_fo->PF, Property_fo->P, dev_bin); cudaDeviceSynchronize(); firstorder_Pf<<<grids, threads>>>(Property_fo->Pf, Property_fo->PF, Property_fo->Np, dev_bin); cudaDeviceSynchronize(); firstorder_P10<<<grids, threads>>>(Property_fo->P10, Property_fo->Pf, dev_bin); firstorder_P25<<<grids, threads>>>(Property_fo->P25, Property_fo->Pf, dev_bin); firstorder_P50<<<grids, threads>>>(Property_fo->P50, Property_fo->Pf, dev_bin); firstorder_P75<<<grids, threads>>>(Property_fo->P75, Property_fo->Pf, dev_bin); firstorder_P90<<<grids, threads>>>(Property_fo->P90, Property_fo->Pf, dev_bin); firstorder_Pmin<<<grids, threads>>>(Property_fo->Pmin, Property_fo->PF, dev_bin); firstorder_Pmax<<<grids, threads>>>(Property_fo->Pmax, Property_fo->PF, dev_bin); cudaDeviceSynchronize(); firstorder_N1090<<<grids, threads>>>(Property_fo->N1090, Property_fo->P, Property_fo->P10, Property_fo->P90, dev_bin); cudaDeviceSynchronize(); firstorder_mP1090<<<grids, threads>>>(Property_fo->mP1090, Property_fo->N1090, Property_fo->Pn, Property_fo->Np, Property_fo->P10, Property_fo->P90, dev_bin); firstorder_Pm<<<grids, threads>>>(Property_fo->Pm, Property_fo->Pn, dev_bin); cudaDeviceSynchronize(); firstorder_Pv<<<grids, threads>>>(Property_fo->Pv, Property_fo->Pm, Property_fo->Pn, dev_bin); } void Calculate_firstorder_Texture_rl(PROPERTY_fo *Property_fo, float *texture_fo, float Epsilon, int Ng, int bin_width, int batch_size) { //START_TIMER(time) //float *Texture_fo = (float*)malloc(sizeof(float) * 19 * batch_size); //printf("Texture_glcm initialized! \n"); // getting the range of the image intensity int dev_bin = bin_width * Ng; dim3 grids1(1, 1, batch_size); dim3 threads1(18, 1); //cudaMalloc((void**)&texture_glcm, sizeof(float) * NA * batch_size); initialize_tex<<<grids1, threads1>>>(texture_fo); cudaDeviceSynchronize(); dim3 grids(1, 1, batch_size); dim3 threads(Ng, bin_width); //printf("getting Texture_glcm CUDA \n"); //Calculate_GLCM_Texture_glcm_kernel<<<grids, threads>>>(Texture_glcm, Property_glcm, Ng); f1_Energy<<<grids, threads>>>(&texture_fo[0 * batch_size], Property_fo->Pn, Property_fo->Np, dev_bin); //cudaMemcpy(&Texture_fo[0 * batch_size], &texture_fo[0 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); f3_Entropy<<<grids, threads>>>(&texture_fo[1 * batch_size], Property_fo->pn, Property_fo->Np, dev_bin, bin_width, Epsilon); //cudaMemcpy(&Texture_fo[1 * batch_size], &texture_fo[1 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f4_Minimum<<<grids, threads>>>(&texture_fo[2 * batch_size], Property_fo->Pmin, dev_bin); //cudaMemcpy(&Texture_fo[2 * batch_size], &texture_fo[2 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f5_TenthPercentile<<<grids, threads>>>(&texture_fo[3 * batch_size], Property_fo->P10, dev_bin); //cudaMemcpy(&Texture_fo[3 * batch_size], &texture_fo[3 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f6_NinetiePercentile<<<grids, threads>>>(&texture_fo[4 * batch_size], Property_fo->P90, dev_bin); //cudaMemcpy(&Texture_fo[4 * batch_size], &texture_fo[4 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f7_Maximum<<<grids, threads>>>(&texture_fo[5 * batch_size], Property_fo->Pmax, dev_bin); //cudaMemcpy(&Texture_fo[5 * batch_size], &texture_fo[5 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f8_Mean<<<grids, threads>>>(&texture_fo[6 * batch_size], Property_fo->Pm, dev_bin); //cudaMemcpy(&Texture_fo[6 * batch_size], &texture_fo[6 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f9_Median<<<grids, threads>>>(&texture_fo[7 * batch_size], Property_fo->P50, dev_bin); //cudaMemcpy(&Texture_fo[7 * batch_size], &texture_fo[7 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f10_InterquartileRange<<<grids, threads>>>(&texture_fo[8 * batch_size], Property_fo->P25, Property_fo->P75, dev_bin); //cudaMemcpy(&Texture_fo[8 * batch_size], &texture_fo[8 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f11_Range<<<grids, threads>>>(&texture_fo[9 * batch_size], Property_fo->Pmin, Property_fo->Pmax, dev_bin); //cudaMemcpy(&Texture_fo[9 * batch_size], &texture_fo[9 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f12_MAD<<<grids, threads>>>(&texture_fo[10 * batch_size], Property_fo->Pn, Property_fo->Pm, Property_fo->Np, dev_bin); //cudaMemcpy(&Texture_fo[10 * batch_size], &texture_fo[10 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f13_rMAD<<<grids, threads>>>(&texture_fo[11 * batch_size], Property_fo->N1090, Property_fo->mP1090, Property_fo->P10, Property_fo->P90, Property_fo->Pn, Property_fo->Np, dev_bin); //cudaMemcpy(&Texture_fo[11 * batch_size], &texture_fo[11 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); f14_RMS<<<grids, threads>>>(&texture_fo[12 * batch_size], &texture_fo[0 * batch_size], Property_fo->Np, dev_bin); //cudaMemcpy(&Texture_fo[12 * batch_size], &texture_fo[12 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f15_StandardDeviation<<<grids, threads>>>(&texture_fo[13 * batch_size], Property_fo->Pv, dev_bin); //cudaMemcpy(&Texture_fo[13 * batch_size], &texture_fo[13 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f16_Skewness<<<grids, threads>>>(&texture_fo[14 * batch_size], Property_fo->Pm, Property_fo->Pn, Property_fo->Pv, dev_bin, Epsilon); //cudaMemcpy(&Texture_fo[14 * batch_size], &texture_fo[14 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f17_Kurtosis<<<grids, threads>>>(&texture_fo[15 * batch_size], Property_fo->Pm, Property_fo->Pn, Property_fo->Pv, dev_bin, Epsilon); //cudaMemcpy(&Texture_fo[15 * batch_size], &texture_fo[15 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f18_Variance<<<grids, threads>>>(&texture_fo[16 * batch_size], Property_fo->Pv, dev_bin); //cudaMemcpy(&Texture_fo[16 * batch_size], &texture_fo[16 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); f19_Uniformity<<<grids, threads>>>(&texture_fo[17 * batch_size], Property_fo->pn, dev_bin, bin_width); //cudaMemcpy(&Texture_fo[17 * batch_size], &texture_fo[17 * batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); //f20_Volume<<<grids, threads>>>(&texture_fo[18 * batch_size], Property_fo->Np, dev_bin); //cudaMemcpy(&Texture_fo[18 * batch_size], &texture_fo[18* batch_size], sizeof(float) * batch_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //STOP_TIMER(time) /* printf("getting Texture_glcm: \n"); printf("f1_Energy: %f \n", Texture_fo[0 * batch_size]); printf("f3_Entropy: %f \n", Texture_fo[1 * batch_size]); printf("f4_Minimum: %f \n", Texture_fo[2 * batch_size]); printf("f5_TenthPercentile: %f \n", Texture_fo[3 * batch_size]); printf("f6_NinetiethPercentile: %f \n", Texture_fo[4 * batch_size]); printf("f7_Maximum: %f \n", Texture_fo[5 * batch_size]); printf("f8_Mean: %f \n", Texture_fo[6 * batch_size]); printf("f9_Median: %f \n", Texture_fo[7 * batch_size]); printf("f10_InterquartileRange: %f \n", Texture_fo[8 * batch_size]); printf("f11_Range: %f \n", Texture_fo[9 * batch_size]); printf("f12_MAD: %f \n", Texture_fo[10 * batch_size]); printf("f13_rMAD: %f \n", Texture_fo[11 * batch_size]); printf("f14_RMS: %f \n", Texture_fo[12 * batch_size]); printf("f15_StandardDeviation: %f \n", Texture_fo[13 * batch_size]); printf("f16_SKewness: %f \n", Texture_fo[14 * batch_size]); printf("f17_Kurtosis: %f \n", Texture_fo[15 * batch_size]); printf("f18_Variance: %f \n", Texture_fo[16 * batch_size]); printf("f19_Uniformity: %f \n", Texture_fo[17 * batch_size]); printf("f20_Uniformity: %f \n", Texture_fo[18 * batch_size]); printf("CUDA Texture_glcm finished \n"); */ //delete Property_glcm; //free(Texture_fo); } __global__ void calculate_firstorder_kernel_rl(int *image, int *P, int *dev_size, int *dev_stride, int dev_bin) { int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int j, P_idx, ix, iy; int img_ith, ipix; img_ith = ip / (dev_size[0] * dev_size[1]); ipix = ip % (dev_size[0] * dev_size[1]); ix = ipix / dev_stride[0]; iy = ipix % dev_stride[0]; P_idx = int(image[ip] > -1) * (image[ip] + img_ith * dev_bin); atomicAdd(&P[P_idx], int(1) * int(image[ip] > -1)); } __global__ void firstorder_Np(int *P, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith, ipix; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Np[img_ith], float(P[ip])); } // histogram of 0-255 __global__ void firstorder_Pn(float *Pn, int *P, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&Pn[ip], float(P[ip]) / Np[img_ith]); } __global__ void firstorder_pn(float *pn, float *Pn, int dev_ng, int dev_bin, int bin_width){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&pn[ipix / bin_width + img_ith * dev_ng], float(Pn[ip])); } __global__ void firstorder_PF(int *PF, int *P, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; for(int i = 0; i < dev_bin; i++){ atomicAdd(&PF[ip], P[i + img_ith * dev_bin] * float(i <= ipix)); } } __global__ void firstorder_Pf(float *Pf, int *PF, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&Pf[ip], float(PF[ip]) / Np[img_ith]); } __global__ void firstorder_P25(int *P25, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P25[img_ith], float(ipix + 1) * float(Pf[ip] < 0.25)); } __global__ void firstorder_P50(int *P50, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P50[img_ith], float(ipix + 1) * float(Pf[ip] < 0.5)); } __global__ void firstorder_P75(int *P75, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P75[img_ith], float(ipix + 1) * float(Pf[ip] < 0.75)); } __global__ void firstorder_P10(int *P10, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P10[img_ith], int(ipix + 1) * int(Pf[ip] < 0.1)); } __global__ void firstorder_P90(int *P90, float *Pf, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&P90[img_ith], float(ipix + 1) * float(Pf[ip] < 0.9)); } __global__ void firstorder_Pmin(int *Pmin, int *PF, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMin(&Pmin[img_ith], PF[ip] > 0? ipix:255); } __global__ void firstorder_Pmax(int *Pmax, int *PF, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicMax(&Pmax[img_ith], ipix * int(PF[ip] > 0)); } __global__ void firstorder_Pm(float *Pm, float *Pn, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Pm[img_ith], Pn[ip] * float(ipix)); } __global__ void firstorder_N1090(int *N1090, int *P, int *P10, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&N1090[img_ith], P[ip] * int(ipix >= P10[img_ith]) * int(ipix <= P90[img_ith])); } __global__ void firstorder_mP1090(float *mP1090, int *N1090, float *Pn, float *Np, int *P10, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&mP1090[img_ith], Pn[ip] * float(ipix) * Np[img_ith] * float(ipix >= P10[img_ith]) * float(ipix <= P90[img_ith]) / float(N1090[img_ith])); } __global__ void firstorder_Pv(float *Pv, float *Pm, float *Pn, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&Pv[img_ith], powf(ipix - Pm[img_ith], 2) * Pn[ip]); } __global__ void f1_Energy(float *rst, float *Pn, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(float(ipix), 2) * Np[img_ith] * Pn[ip]); } __global__ void f3_Entropy(float *rst, float *pn, float *Np, int dev_bin, int bin_width, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith, ipig; img_ith = ip / dev_bin; ipix = ip % dev_bin; ipig = ipix / bin_width; atomicAdd(&rst[img_ith], -log2f(pn[ipig + img_ith * dev_bin / bin_width] + Epsilon) * pn[ipig + img_ith * dev_bin / bin_width] * float(ipix % bin_width == 0)); } __global__ void f4_Minimum(float *rst, int *Pmin, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], float(Pmin[img_ith])); } __global__ void f5_TenthPercentile(float *rst, int *P10, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P10[img_ith])); } __global__ void f6_NinetiePercentile(float *rst, int *P90, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P90[img_ith])); } __global__ void f7_Maximum(float *rst, int *Pmax, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], float(Pmax[img_ith])); } __global__ void f8_Mean(float *rst, float *Pm, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], Pm[img_ith]); } __global__ void f9_Median(float *rst, int *P50, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P50[img_ith])); } __global__ void f10_InterquartileRange(float *rst, int *P25, int *P75, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(P75[img_ith] - P25[img_ith])); } __global__ void f11_Range(float *rst, int *Pmin, int *Pmax, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], float(Pmax[img_ith] - Pmin[img_ith])); } __global__ void f12_MAD(float *rst, float *Pn, float *Pm, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], abs(float(ipix) - Pm[img_ith]) * Pn[ip]); } __global__ void f13_rMAD(float *rst, int *N1090, float *mP1090, int *P10, int *P90, float *Pn, float * Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], float(ipix>=P10[img_ith]) * float(ipix<=P90[img_ith]) * abs(ipix - mP1090[img_ith]) * Pn[ip] * Np[img_ith] / (N1090[img_ith] + 1)); } __global__ void f14_RMS(float *rst, float *Energy, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], sqrt(abs(Energy[img_ith]) / Np[img_ith])); } __global__ void f18_Variance(float *rst, float *Pv, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicExch(&rst[img_ith], Pv[img_ith]); //powf(ipix - Pm[img_ith], 2) * Pn[ip]); } __global__ void f15_StandardDeviation(float *rst, float *Pv, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], sqrtf(Pv[img_ith])); } __global__ void f16_Skewness(float *rst, float *Pm, float *Pn, float *Pv, int dev_bin, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(ipix - Pm[img_ith], 3) * Pn[ip] / (powf(Pv[img_ith], 1.5) + Epsilon)); } __global__ void f17_Kurtosis(float *rst, float *Pm, float *Pn, float *Pv, int dev_bin, float Epsilon){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; atomicAdd(&rst[img_ith], powf(ipix - Pm[img_ith], 4) * Pn[ip] / (powf(Pv[img_ith], 2) + Epsilon)); } __global__ void f19_Uniformity(float *rst, float *pn, int dev_bin, int bin_width){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipig, ipix, img_ith; img_ith = ip / dev_bin; ipix = ip % dev_bin; ipig = ipix / bin_width; atomicAdd(&rst[img_ith], powf(pn[ipig + img_ith * dev_bin / bin_width], 2) * float(ipix % bin_width == 0)); } __global__ void f20_Volume(float *rst, float *Np, int dev_bin){ int blocks = gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y + blockIdx.x; int threads = blockDim.x * threadIdx.y + threadIdx.x; int ip = blocks * blockDim.x * blockDim.y + threads; int ipig, ipix, img_ith; img_ith = ip / dev_bin; atomicExch(&rst[img_ith], Np[img_ith]); }
0a27df746ec4e826f002831f5f2cb8f107c61a0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "culayer.hpp" #include <stdio.h> // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf #define DIV_UP(x,y) (1 + ((x - 1) / y)) __constant__ seed_t RNG_G = (seed_t)(6364136223846793005ull); __constant__ seed_t RNG_C = (seed_t)(1442695040888963407ull); __constant__ seed_t RNG_P = (seed_t)(1) << 63; __device__ __forceinline__ float cu_rnd_real(seed_t* seed) { float inv_RNG_P = (float)(1) / (float)(RNG_P); *seed = (RNG_G * *seed + RNG_C) % RNG_P; return (float)(*seed) * inv_RNG_P; } __global__ void particle_step_kernel(int n, Particle* particles, int steps, float const* const sigs_in, float const* const absorption_rates_in, float * const weights_absorbed_out, int min_index, int max_index, float dx) { extern __shared__ float sdata[]; int n_cells = max_index-min_index; float * const sigs = sdata; float * const absorption_rates = sdata + n_cells; float * const weights_absorbed = sdata + 2*n_cells; for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ int cpy_ind = j*blockDim.x + threadIdx.x; if (cpy_ind < n_cells){ sigs[cpy_ind] = sigs_in[cpy_ind]; absorption_rates[cpy_ind] = absorption_rates_in[cpy_ind]; weights_absorbed[cpy_ind] = 0; } } __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ Particle particle = particles[i]; for (int step = 0; step < steps; step++){ if (particle.index >= min_index && particle.index < max_index) { int local_index = particle.index - min_index; const float interaction_rate = 1.0 - absorption_rates[local_index]; const float sig_a = sigs[local_index] * absorption_rates[local_index]; const float sig_i = sigs[local_index] * interaction_rate; // calculate theoretic movement const float h = cu_rnd_real(&particle.seed); float di = MAXREAL; if (sig_i > EPS_PRECISION){ // This should always be true di = -log(h) / sig_i; } // -- possible new cell -- float mu_sign = copysignf(1.0, particle.mu); int index_new = __float2int_rn(mu_sign) + particle.index; float x_new_edge = particle.index * dx; if (mu_sign == 1){ x_new_edge += dx; } float di_edge = MAXREAL; if (particle.mu < -EPS_PRECISION || EPS_PRECISION < particle.mu){ di_edge = (x_new_edge - particle.x) / particle.mu; } if (di < di_edge) { /* move inside cell an draw new mu */ index_new = particle.index; particle.x += di * particle.mu; particle.mu = 2 * cu_rnd_real(&particle.seed) - 1; } else { /* set position to border */ di = di_edge; particle.x = x_new_edge; } // -- Calculate amount of absorbed energy -- const float dw = (1 - expf(-sig_a * di)) * particle.wmc; /* Weight removed from particle is added to the layer */ particle.wmc -= dw; atomicAdd(weights_absorbed + local_index, dw); particle.index = index_new; } } particles[i] = particle; } __syncthreads(); for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ int cpy_ind = j*blockDim.x + threadIdx.x; if (cpy_ind < n_cells){ atomicAdd(weights_absorbed_out + cpy_ind, weights_absorbed[cpy_ind]); } } }
0a27df746ec4e826f002831f5f2cb8f107c61a0f.cu
#include "culayer.hpp" #include <stdio.h> // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf #define DIV_UP(x,y) (1 + ((x - 1) / y)) __constant__ seed_t RNG_G = (seed_t)(6364136223846793005ull); __constant__ seed_t RNG_C = (seed_t)(1442695040888963407ull); __constant__ seed_t RNG_P = (seed_t)(1) << 63; __device__ __forceinline__ float cu_rnd_real(seed_t* seed) { float inv_RNG_P = (float)(1) / (float)(RNG_P); *seed = (RNG_G * *seed + RNG_C) % RNG_P; return (float)(*seed) * inv_RNG_P; } __global__ void particle_step_kernel(int n, Particle* particles, int steps, float const* const sigs_in, float const* const absorption_rates_in, float * const weights_absorbed_out, int min_index, int max_index, float dx) { extern __shared__ float sdata[]; int n_cells = max_index-min_index; float * const sigs = sdata; float * const absorption_rates = sdata + n_cells; float * const weights_absorbed = sdata + 2*n_cells; for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ int cpy_ind = j*blockDim.x + threadIdx.x; if (cpy_ind < n_cells){ sigs[cpy_ind] = sigs_in[cpy_ind]; absorption_rates[cpy_ind] = absorption_rates_in[cpy_ind]; weights_absorbed[cpy_ind] = 0; } } __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ Particle particle = particles[i]; for (int step = 0; step < steps; step++){ if (particle.index >= min_index && particle.index < max_index) { int local_index = particle.index - min_index; const float interaction_rate = 1.0 - absorption_rates[local_index]; const float sig_a = sigs[local_index] * absorption_rates[local_index]; const float sig_i = sigs[local_index] * interaction_rate; // calculate theoretic movement const float h = cu_rnd_real(&particle.seed); float di = MAXREAL; if (sig_i > EPS_PRECISION){ // This should always be true di = -log(h) / sig_i; } // -- possible new cell -- float mu_sign = copysignf(1.0, particle.mu); int index_new = __float2int_rn(mu_sign) + particle.index; float x_new_edge = particle.index * dx; if (mu_sign == 1){ x_new_edge += dx; } float di_edge = MAXREAL; if (particle.mu < -EPS_PRECISION || EPS_PRECISION < particle.mu){ di_edge = (x_new_edge - particle.x) / particle.mu; } if (di < di_edge) { /* move inside cell an draw new mu */ index_new = particle.index; particle.x += di * particle.mu; particle.mu = 2 * cu_rnd_real(&particle.seed) - 1; } else { /* set position to border */ di = di_edge; particle.x = x_new_edge; } // -- Calculate amount of absorbed energy -- const float dw = (1 - expf(-sig_a * di)) * particle.wmc; /* Weight removed from particle is added to the layer */ particle.wmc -= dw; atomicAdd(weights_absorbed + local_index, dw); particle.index = index_new; } } particles[i] = particle; } __syncthreads(); for (int j = 0; j < DIV_UP(n_cells, blockDim.x); j++){ int cpy_ind = j*blockDim.x + threadIdx.x; if (cpy_ind < n_cells){ atomicAdd(weights_absorbed_out + cpy_ind, weights_absorbed[cpy_ind]); } } }
997ca88979e0d9f122859d3d9d96c8c768a14f68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * \file lbmFlowAroundCylinder.cu * \brief Cuda version based on lbm_sailfish_hist and lbm_opt1. * \author Adrien Python * \date 28.01.2017 */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <stdbool.h> #include <lbm.h> #define RE 220.0 // Reynolds number #define NX 420 // Number of lattice nodes (width) #define NY 180 // Number of lattice nodes (height) #define NL ((NX)*(NY)) // Number of lattice nodes (total) #define LY ((NY) - 1) // Height of the domain in lattice units #define CX ((NX) / 4) // X coordinates of the cylinder #define CY ((NY) / 2) // Y coordinates of the cylinder #define R ((NY) / 9) // Cylinder radius #define ULB 0.04 // Velocity in lattice units #define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units #define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter #define BLOCK_SIZE 64 #define SQUARE(a) ((a)*(a)) #define GPU_SQUARE(a) __dmul_rn(a,a) #define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0) #define IDX(x, y) ((x+NX)%(NX) + ( (y+NY)%(NY) )*(NX)) struct lbm_lattices{ double ne[NL], e[NL], se[NL], n[NL], c[NL], s[NL], nw[NL], w[NL], sw[NL]; }; struct lbm_u { double u0[NL]; double u1[NL]; }; typedef struct { bool obstacles[NL]; // Should reside in lbm_consts but is too big for constant memory double u0[NL]; double u1[NL]; lbm_lattices f0; lbm_lattices f1; } lbm_vars; typedef struct { double vel[NY]; } lbm_consts; __constant__ lbm_consts d_consts; #define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__)) inline void handleError(hipError_t code, const char *file, int line) { if (code != hipSuccess) { fprintf(stderr,"CUDA assert: %s %s %d\n", hipGetErrorString(code), file, line); exit(EXIT_FAILURE); } } #define HANDLE_KERNEL_ERROR(...) \ do { \ __VA_ARGS__; \ /* HANDLE_ERROR( hipPeekAtLastError() ); */ \ /* HANDLE_ERROR( hipDeviceSynchronize() );*/ \ } while(0) /** * Setup: cylindrical obstacle and velocity inlet with perturbation * Creation of a mask with boolean values, defining the shape of the obstacle. */ static void initObstacles(bool* obstacles) { for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { obstacles[IDX(x,y)] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R); } } } /** * Initial velocity profile: almost zero, with a slight perturbation to trigger * the instability. */ static void initVelocity(double* vel) { for (int y = 0; y < NY; y++) { vel[y] = ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) ); } } __host__ static void h_equilibrium(lbm_lattices* f, int index, double rho, double u) { double usqr = 3./2 * ( SQUARE(u) ); double cu = 3 * u; f->ne[index] = rho * 1./36 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->e [index] = rho * 1./9 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->se[index] = rho * 1./36 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->n [index] = rho * 1./9 * ( 1 - usqr ); f->c [index] = rho * 4./9 * ( 1 - usqr ); f->s [index] = rho * 1./9 * ( 1 - usqr ); f->nw[index] = rho * 1./36 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); f->w [index] = rho * 1./9 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); f->sw[index] = rho * 1./36 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); } __device__ static void d_equilibrium(double* ne, double* e, double* se, double* n, double* c, double* s, double* nw, double* w, double* sw, double rho, double u0, double u1) { double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u0), GPU_SQUARE(u1) )); double cu_ne = 3 * ( u0 + u1 ); double cu_se = 3 * ( u0 - u1 ); double cu_ns = 3 * u1; double cu_we = 3 * u0; double cu_nw = 3 * ( -u0 + u1 ); double cu_sw = 3 * ( -u0 - u1 ); *ne = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_ne) , __dmul_rn(0.5, GPU_SQUARE(cu_ne))), - usqr) ); *e = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_we) , __dmul_rn(0.5, GPU_SQUARE(cu_we))), - usqr) ); *se = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_se) , __dmul_rn(0.5, GPU_SQUARE(cu_se))), - usqr) ); *n = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_ns) , __dmul_rn(0.5, GPU_SQUARE(cu_ns))), - usqr) ); *c = __dmul_rn(__dmul_rn(rho, 4./9 ), __dadd_rn(1, - usqr) ); *s = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1,-cu_ns ), __dmul_rn(0.5, GPU_SQUARE(cu_ns))), - usqr) ); *nw = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_nw) , __dmul_rn(0.5, GPU_SQUARE(cu_nw))), - usqr) ); *w = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1,-cu_we) , __dmul_rn(0.5, GPU_SQUARE(cu_we))), - usqr) ); *sw = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_sw) , __dmul_rn(0.5, GPU_SQUARE(cu_sw))), - usqr) ); } __device__ static void macroscopic(double ne, double e, double se, double n, double c, double s, double nw, double w, double sw, double* rho, double* u0, double* u1) { *rho = ne + e + se + n + c + s + nw + w + sw; *u0 = (ne + e + se - nw - w - sw) / *rho; *u1 = (ne - se + n - s + nw - sw) / *rho; } __global__ void lbm_right_wall(lbm_lattices* f) { for (int y = blockIdx.y; y < NY; y+=gridDim.y) { // Right wall: outflow condition. f->nw[IDX(NX-1,y)] = f->nw[IDX(NX-2,y)]; f->w [IDX(NX-1,y)] = f->w [IDX(NX-2,y)]; f->sw[IDX(NX-1,y)] = f->sw[IDX(NX-2,y)]; } } __global__ void lbm_computation(lbm_vars *d_vars, lbm_lattices* f0, lbm_lattices* f1) { int tix = threadIdx.x; for (int y = blockIdx.y; y < NY; y+=gridDim.y) { for (int x = threadIdx.x + blockIdx.x * blockDim.x; x < NX; x += blockDim.x * gridDim.x) { size_t gi = IDX(x,y); double fin_ne, fin_e, fin_se, fin_n, fin_c, fin_s, fin_nw, fin_w, fin_sw; double fout_ne, fout_e, fout_se, fout_n, fout_c, fout_s, fout_nw, fout_w, fout_sw; fin_ne = f0->ne[gi]; fin_e = f0->e [gi]; fin_se = f0->se[gi]; fin_n = f0->n [gi]; fin_c = f0->c [gi]; fin_s = f0->s [gi]; fin_nw = f0->nw[gi]; fin_w = f0->w [gi]; fin_sw = f0->sw[gi]; // Compute macroscopic variables, density and velocity double rho, u0, u1; macroscopic(fin_ne, fin_e, fin_se, fin_n, fin_c, fin_s, fin_nw, fin_w, fin_sw, &rho, &u0, &u1); if (x == 0) { // Left wall: inflow condition u0 = d_consts.vel[y]; u1 = 0; // Calculate the density double s2 = fin_n + fin_c + fin_s; double s3 = fin_nw + fin_w + fin_sw;; rho = 1./(1 - u0) * (s2 + 2*s3); } // Compute equilibrium double feq_ne, feq_e, feq_se, feq_n, feq_c, feq_s, feq_nw, feq_w, feq_sw; d_equilibrium(&feq_ne, &feq_e, &feq_se, &feq_n, &feq_c, &feq_s, &feq_nw, &feq_w, &feq_sw, rho, u0, u1); if (x == 0) { fin_ne = feq_ne + fin_sw - feq_sw; fin_e = feq_e + fin_w - feq_w ; fin_se = feq_se + fin_nw - feq_nw; } if (d_vars->obstacles[IDX(x, y)]) { // Bounce-back condition for obstacle fout_ne = fin_sw; fout_e = fin_w ; fout_se = fin_nw; fout_n = fin_s ; fout_c = fin_c ; fout_s = fin_n ; fout_nw = fin_se; fout_w = fin_e ; fout_sw = fin_ne; } else { // Collision step fout_ne = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_ne, - feq_ne)), fin_ne); fout_e = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_e , - feq_e )), fin_e ); fout_se = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_se, - feq_se)), fin_se); fout_n = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_n , - feq_n )), fin_n ); fout_c = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_c , - feq_c )), fin_c ); fout_s = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_s , - feq_s )), fin_s ); fout_nw = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_nw, - feq_nw)), fin_nw); fout_w = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_w , - feq_w )), fin_w ); fout_sw = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_sw, - feq_sw)), fin_sw); } d_vars->u0[gi] = u0; d_vars->u1[gi] = u1; // STREAMING // shared variables for in-block propagation __shared__ double fo_E [BLOCK_SIZE]; __shared__ double fo_W [BLOCK_SIZE]; __shared__ double fo_SE[BLOCK_SIZE]; __shared__ double fo_SW[BLOCK_SIZE]; __shared__ double fo_NE[BLOCK_SIZE]; __shared__ double fo_NW[BLOCK_SIZE]; // Center 'propagation' (global memory) f1->c[gi] = fout_c; // N + S propagation (global memory) f1->s[IDX(x, y-1)] = fout_s; f1->n[IDX(x, y+1)] = fout_n; // E propagation in shared memory if (tix < blockDim.x-1 && x < NX-1) { fo_E [tix+1] = fout_e; fo_NE[tix+1] = fout_ne; fo_SE[tix+1] = fout_se; // E propagation in global memory (at block boundary) } else { f1->e [IDX(x+1, y )] = fout_e; f1->se[IDX(x+1, y-1)] = fout_se; f1->ne[IDX(x+1, y+1)] = fout_ne; } // W propagation in shared memory if (tix > 0) { fo_W [tix-1] = fout_w; fo_NW[tix-1] = fout_nw; fo_SW[tix-1] = fout_sw; // W propagation in global memory (at block boundary) } else { f1->w [IDX(x-1, y )] = fout_w; f1->sw[IDX(x-1, y-1)] = fout_sw; f1->nw[IDX(x-1, y+1)] = fout_nw; } __syncthreads(); // the leftmost thread is not updated in this block if (tix > 0) { f1->e [gi ] = fo_E [tix]; f1->se[IDX(x, y-1)] = fo_SE[tix]; f1->ne[IDX(x, y+1)] = fo_NE[tix]; } // the rightmost thread is not updated in this block if (tix < blockDim.x-1 && x < NX-1) { f1->w [gi ] = fo_W [tix]; f1->sw[IDX(x, y-1)] = fo_SW[tix]; f1->nw[IDX(x, y+1)] = fo_NW[tix]; } __syncthreads(); // only nessessary when NX % BLOCK_SIZE != 0 } } } struct lbm_simulation{ lbm_vars h_vars, *d_vars; dim3 dimComputationGrid, dimComputationBlock; dim3 dimRightWallGrid, dimRightWallBlock; size_t shared_mem_size; bool switch_f0_f1; }; lbm_simulation* lbm_simulation_create() { lbm_simulation* lbm_sim = (lbm_simulation*) malloc (sizeof(lbm_simulation)); lbm_consts h_consts; initVelocity(h_consts.vel); HANDLE_ERROR(hipMemcpyToSymbol(d_consts, &h_consts, sizeof(lbm_consts))); initObstacles(lbm_sim->h_vars.obstacles); // Initialization of the populations at equilibrium with the given velocity. lbm_sim->switch_f0_f1 = false; for (int y = 0; y < NY; y++) { for (int x = 0; x < NX; x++) { h_equilibrium(&lbm_sim->h_vars.f0, IDX(x,y), 1.0, h_consts.vel[y]); } } HANDLE_ERROR(hipMalloc(&lbm_sim->d_vars, sizeof(lbm_vars))); HANDLE_ERROR(hipMemcpy(lbm_sim->d_vars, &lbm_sim->h_vars, sizeof(lbm_vars), hipMemcpyHostToDevice)); hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); dim3 dimComputationGrid(max(1, NX/BLOCK_SIZE), min(NY, prop.maxGridSize[1])); dim3 dimComputationBlock(BLOCK_SIZE); lbm_sim->dimComputationGrid = dimComputationGrid; lbm_sim->dimComputationBlock = dimComputationBlock; dim3 dimRightWallGrid(1, min(NY, prop.maxGridSize[1])); dim3 dimRightWallBlock(1); lbm_sim->dimRightWallGrid = dimRightWallGrid; lbm_sim->dimRightWallBlock = dimRightWallBlock; lbm_sim->shared_mem_size = 6 * sizeof(double) * BLOCK_SIZE; return lbm_sim; } void lbm_simulation_destroy(lbm_simulation* lbm_sim) { HANDLE_ERROR(hipFree(lbm_sim->d_vars)); free(lbm_sim); } void lbm_simulation_update(lbm_simulation* lbm_sim) { if (lbm_sim->switch_f0_f1) { hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_right_wall), dim3(lbm_sim->dimRightWallGrid), dim3(lbm_sim->dimRightWallBlock), 0, 0, &lbm_sim->d_vars->f1)); hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_computation), dim3(lbm_sim->dimComputationGrid), dim3(lbm_sim->dimComputationBlock), lbm_sim->shared_mem_size, 0, lbm_sim->d_vars, &lbm_sim->d_vars->f1, &lbm_sim->d_vars->f0)); } else { hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_right_wall), dim3(lbm_sim->dimRightWallGrid), dim3(lbm_sim->dimRightWallBlock), 0, 0, &lbm_sim->d_vars->f0)); hipLaunchKernelGGL(( HANDLE_KERNEL_ERROR(lbm_computation), dim3(lbm_sim->dimComputationGrid), dim3(lbm_sim->dimComputationBlock), lbm_sim->shared_mem_size, 0, lbm_sim->d_vars, &lbm_sim->d_vars->f0, &lbm_sim->d_vars->f1)); } lbm_sim->switch_f0_f1 = ! lbm_sim->switch_f0_f1; } void lbm_simulation_get_size(lbm_simulation* lbm_sim, size_t* width, size_t* height) { *width = NX; *height = NY; } void lbm_lattices_read(lbm_simulation* lbm_sim, lbm_lattices* lat) { lbm_lattices* d_lat = lbm_sim->switch_f0_f1 ? &lbm_sim->d_vars->f1 : &lbm_sim->d_vars->f0; HANDLE_ERROR(hipMemcpy(lat, d_lat, sizeof(lbm_lattices), hipMemcpyDeviceToHost)); } void lbm_u_read(lbm_simulation* lbm_sim, lbm_u* u) { HANDLE_ERROR(hipMemcpy(u->u0, lbm_sim->d_vars->u0, sizeof(double)*NL, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(u->u1, lbm_sim->d_vars->u1, sizeof(double)*NL, hipMemcpyDeviceToHost)); } lbm_lattices* lbm_lattices_create() { return (lbm_lattices*) malloc(sizeof(lbm_lattices)); } void lbm_lattices_destroy(lbm_lattices* lat) { free(lat); } lbm_u* lbm_u_create() { return (lbm_u*) malloc(sizeof(lbm_u)); } void lbm_u_destroy(lbm_u* u) { free(u); } void lbm_lattices_at_index(lbm_lattice* lattice, lbm_lattices* lattices, int x, int y) { int gi = IDX(x,y); lbm_lattices* lat = (lbm_lattices*) lattices; lattice->ne = lat->ne[gi]; lattice->e = lat->e [gi]; lattice->se = lat->se[gi]; lattice->n = lat->n [gi]; lattice->c = lat->c [gi]; lattice->s = lat->s [gi]; lattice->nw = lat->nw[gi]; lattice->w = lat->w [gi]; lattice->sw = lat->sw[gi]; } void lbm_u_at_index(double* u0, double* u1, lbm_u* u, int x, int y) { int gi = IDX(x,y); *u0 = u->u0[gi]; *u1 = u->u1[gi]; }
997ca88979e0d9f122859d3d9d96c8c768a14f68.cu
/*! * \file lbmFlowAroundCylinder.cu * \brief Cuda version based on lbm_sailfish_hist and lbm_opt1. * \author Adrien Python * \date 28.01.2017 */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <stdbool.h> #include <lbm.h> #define RE 220.0 // Reynolds number #define NX 420 // Number of lattice nodes (width) #define NY 180 // Number of lattice nodes (height) #define NL ((NX)*(NY)) // Number of lattice nodes (total) #define LY ((NY) - 1) // Height of the domain in lattice units #define CX ((NX) / 4) // X coordinates of the cylinder #define CY ((NY) / 2) // Y coordinates of the cylinder #define R ((NY) / 9) // Cylinder radius #define ULB 0.04 // Velocity in lattice units #define NULB ((ULB) * (R) / (RE)) // Viscoscity in lattice units #define OMEGA ((double)1. / (3*(NULB)+0.5)) // Relaxation parameter #define BLOCK_SIZE 64 #define SQUARE(a) ((a)*(a)) #define GPU_SQUARE(a) __dmul_rn(a,a) #define INDEX_2D_FROM_1D(x, y, i) do { (y) = (i)/(NX), (x) = (i)%(NX); } while (0) #define IDX(x, y) ((x+NX)%(NX) + ( (y+NY)%(NY) )*(NX)) struct lbm_lattices{ double ne[NL], e[NL], se[NL], n[NL], c[NL], s[NL], nw[NL], w[NL], sw[NL]; }; struct lbm_u { double u0[NL]; double u1[NL]; }; typedef struct { bool obstacles[NL]; // Should reside in lbm_consts but is too big for constant memory double u0[NL]; double u1[NL]; lbm_lattices f0; lbm_lattices f1; } lbm_vars; typedef struct { double vel[NY]; } lbm_consts; __constant__ lbm_consts d_consts; #define HANDLE_ERROR(ans) (handleError((ans), __FILE__, __LINE__)) inline void handleError(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line); exit(EXIT_FAILURE); } } #define HANDLE_KERNEL_ERROR(...) \ do { \ __VA_ARGS__; \ /* HANDLE_ERROR( cudaPeekAtLastError() ); */ \ /* HANDLE_ERROR( cudaDeviceSynchronize() );*/ \ } while(0) /** * Setup: cylindrical obstacle and velocity inlet with perturbation * Creation of a mask with boolean values, defining the shape of the obstacle. */ static void initObstacles(bool* obstacles) { for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { obstacles[IDX(x,y)] = SQUARE(x-CX) + SQUARE(y-CY) < SQUARE(R); } } } /** * Initial velocity profile: almost zero, with a slight perturbation to trigger * the instability. */ static void initVelocity(double* vel) { for (int y = 0; y < NY; y++) { vel[y] = ULB * (1 + 0.0001 * sin( y / (double)LY * 2 * M_PI) ); } } __host__ static void h_equilibrium(lbm_lattices* f, int index, double rho, double u) { double usqr = 3./2 * ( SQUARE(u) ); double cu = 3 * u; f->ne[index] = rho * 1./36 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->e [index] = rho * 1./9 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->se[index] = rho * 1./36 * ( 1 + cu + 0.5 * SQUARE(cu) - usqr ); f->n [index] = rho * 1./9 * ( 1 - usqr ); f->c [index] = rho * 4./9 * ( 1 - usqr ); f->s [index] = rho * 1./9 * ( 1 - usqr ); f->nw[index] = rho * 1./36 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); f->w [index] = rho * 1./9 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); f->sw[index] = rho * 1./36 * ( 1 - cu + 0.5 * SQUARE(cu) - usqr ); } __device__ static void d_equilibrium(double* ne, double* e, double* se, double* n, double* c, double* s, double* nw, double* w, double* sw, double rho, double u0, double u1) { double usqr = __dmul_rn(3./2, __dadd_rn( GPU_SQUARE(u0), GPU_SQUARE(u1) )); double cu_ne = 3 * ( u0 + u1 ); double cu_se = 3 * ( u0 - u1 ); double cu_ns = 3 * u1; double cu_we = 3 * u0; double cu_nw = 3 * ( -u0 + u1 ); double cu_sw = 3 * ( -u0 - u1 ); *ne = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_ne) , __dmul_rn(0.5, GPU_SQUARE(cu_ne))), - usqr) ); *e = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_we) , __dmul_rn(0.5, GPU_SQUARE(cu_we))), - usqr) ); *se = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_se) , __dmul_rn(0.5, GPU_SQUARE(cu_se))), - usqr) ); *n = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_ns) , __dmul_rn(0.5, GPU_SQUARE(cu_ns))), - usqr) ); *c = __dmul_rn(__dmul_rn(rho, 4./9 ), __dadd_rn(1, - usqr) ); *s = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1,-cu_ns ), __dmul_rn(0.5, GPU_SQUARE(cu_ns))), - usqr) ); *nw = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_nw) , __dmul_rn(0.5, GPU_SQUARE(cu_nw))), - usqr) ); *w = __dmul_rn(__dmul_rn(rho, 1./9 ), __dadd_rn(__dadd_rn(__dadd_rn(1,-cu_we) , __dmul_rn(0.5, GPU_SQUARE(cu_we))), - usqr) ); *sw = __dmul_rn(__dmul_rn(rho, 1./36), __dadd_rn(__dadd_rn(__dadd_rn(1, cu_sw) , __dmul_rn(0.5, GPU_SQUARE(cu_sw))), - usqr) ); } __device__ static void macroscopic(double ne, double e, double se, double n, double c, double s, double nw, double w, double sw, double* rho, double* u0, double* u1) { *rho = ne + e + se + n + c + s + nw + w + sw; *u0 = (ne + e + se - nw - w - sw) / *rho; *u1 = (ne - se + n - s + nw - sw) / *rho; } __global__ void lbm_right_wall(lbm_lattices* f) { for (int y = blockIdx.y; y < NY; y+=gridDim.y) { // Right wall: outflow condition. f->nw[IDX(NX-1,y)] = f->nw[IDX(NX-2,y)]; f->w [IDX(NX-1,y)] = f->w [IDX(NX-2,y)]; f->sw[IDX(NX-1,y)] = f->sw[IDX(NX-2,y)]; } } __global__ void lbm_computation(lbm_vars *d_vars, lbm_lattices* f0, lbm_lattices* f1) { int tix = threadIdx.x; for (int y = blockIdx.y; y < NY; y+=gridDim.y) { for (int x = threadIdx.x + blockIdx.x * blockDim.x; x < NX; x += blockDim.x * gridDim.x) { size_t gi = IDX(x,y); double fin_ne, fin_e, fin_se, fin_n, fin_c, fin_s, fin_nw, fin_w, fin_sw; double fout_ne, fout_e, fout_se, fout_n, fout_c, fout_s, fout_nw, fout_w, fout_sw; fin_ne = f0->ne[gi]; fin_e = f0->e [gi]; fin_se = f0->se[gi]; fin_n = f0->n [gi]; fin_c = f0->c [gi]; fin_s = f0->s [gi]; fin_nw = f0->nw[gi]; fin_w = f0->w [gi]; fin_sw = f0->sw[gi]; // Compute macroscopic variables, density and velocity double rho, u0, u1; macroscopic(fin_ne, fin_e, fin_se, fin_n, fin_c, fin_s, fin_nw, fin_w, fin_sw, &rho, &u0, &u1); if (x == 0) { // Left wall: inflow condition u0 = d_consts.vel[y]; u1 = 0; // Calculate the density double s2 = fin_n + fin_c + fin_s; double s3 = fin_nw + fin_w + fin_sw;; rho = 1./(1 - u0) * (s2 + 2*s3); } // Compute equilibrium double feq_ne, feq_e, feq_se, feq_n, feq_c, feq_s, feq_nw, feq_w, feq_sw; d_equilibrium(&feq_ne, &feq_e, &feq_se, &feq_n, &feq_c, &feq_s, &feq_nw, &feq_w, &feq_sw, rho, u0, u1); if (x == 0) { fin_ne = feq_ne + fin_sw - feq_sw; fin_e = feq_e + fin_w - feq_w ; fin_se = feq_se + fin_nw - feq_nw; } if (d_vars->obstacles[IDX(x, y)]) { // Bounce-back condition for obstacle fout_ne = fin_sw; fout_e = fin_w ; fout_se = fin_nw; fout_n = fin_s ; fout_c = fin_c ; fout_s = fin_n ; fout_nw = fin_se; fout_w = fin_e ; fout_sw = fin_ne; } else { // Collision step fout_ne = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_ne, - feq_ne)), fin_ne); fout_e = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_e , - feq_e )), fin_e ); fout_se = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_se, - feq_se)), fin_se); fout_n = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_n , - feq_n )), fin_n ); fout_c = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_c , - feq_c )), fin_c ); fout_s = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_s , - feq_s )), fin_s ); fout_nw = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_nw, - feq_nw)), fin_nw); fout_w = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_w , - feq_w )), fin_w ); fout_sw = __dadd_rn(__dmul_rn(-OMEGA, __dadd_rn(fin_sw, - feq_sw)), fin_sw); } d_vars->u0[gi] = u0; d_vars->u1[gi] = u1; // STREAMING // shared variables for in-block propagation __shared__ double fo_E [BLOCK_SIZE]; __shared__ double fo_W [BLOCK_SIZE]; __shared__ double fo_SE[BLOCK_SIZE]; __shared__ double fo_SW[BLOCK_SIZE]; __shared__ double fo_NE[BLOCK_SIZE]; __shared__ double fo_NW[BLOCK_SIZE]; // Center 'propagation' (global memory) f1->c[gi] = fout_c; // N + S propagation (global memory) f1->s[IDX(x, y-1)] = fout_s; f1->n[IDX(x, y+1)] = fout_n; // E propagation in shared memory if (tix < blockDim.x-1 && x < NX-1) { fo_E [tix+1] = fout_e; fo_NE[tix+1] = fout_ne; fo_SE[tix+1] = fout_se; // E propagation in global memory (at block boundary) } else { f1->e [IDX(x+1, y )] = fout_e; f1->se[IDX(x+1, y-1)] = fout_se; f1->ne[IDX(x+1, y+1)] = fout_ne; } // W propagation in shared memory if (tix > 0) { fo_W [tix-1] = fout_w; fo_NW[tix-1] = fout_nw; fo_SW[tix-1] = fout_sw; // W propagation in global memory (at block boundary) } else { f1->w [IDX(x-1, y )] = fout_w; f1->sw[IDX(x-1, y-1)] = fout_sw; f1->nw[IDX(x-1, y+1)] = fout_nw; } __syncthreads(); // the leftmost thread is not updated in this block if (tix > 0) { f1->e [gi ] = fo_E [tix]; f1->se[IDX(x, y-1)] = fo_SE[tix]; f1->ne[IDX(x, y+1)] = fo_NE[tix]; } // the rightmost thread is not updated in this block if (tix < blockDim.x-1 && x < NX-1) { f1->w [gi ] = fo_W [tix]; f1->sw[IDX(x, y-1)] = fo_SW[tix]; f1->nw[IDX(x, y+1)] = fo_NW[tix]; } __syncthreads(); // only nessessary when NX % BLOCK_SIZE != 0 } } } struct lbm_simulation{ lbm_vars h_vars, *d_vars; dim3 dimComputationGrid, dimComputationBlock; dim3 dimRightWallGrid, dimRightWallBlock; size_t shared_mem_size; bool switch_f0_f1; }; lbm_simulation* lbm_simulation_create() { lbm_simulation* lbm_sim = (lbm_simulation*) malloc (sizeof(lbm_simulation)); lbm_consts h_consts; initVelocity(h_consts.vel); HANDLE_ERROR(cudaMemcpyToSymbol(d_consts, &h_consts, sizeof(lbm_consts))); initObstacles(lbm_sim->h_vars.obstacles); // Initialization of the populations at equilibrium with the given velocity. lbm_sim->switch_f0_f1 = false; for (int y = 0; y < NY; y++) { for (int x = 0; x < NX; x++) { h_equilibrium(&lbm_sim->h_vars.f0, IDX(x,y), 1.0, h_consts.vel[y]); } } HANDLE_ERROR(cudaMalloc(&lbm_sim->d_vars, sizeof(lbm_vars))); HANDLE_ERROR(cudaMemcpy(lbm_sim->d_vars, &lbm_sim->h_vars, sizeof(lbm_vars), cudaMemcpyHostToDevice)); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); dim3 dimComputationGrid(max(1, NX/BLOCK_SIZE), min(NY, prop.maxGridSize[1])); dim3 dimComputationBlock(BLOCK_SIZE); lbm_sim->dimComputationGrid = dimComputationGrid; lbm_sim->dimComputationBlock = dimComputationBlock; dim3 dimRightWallGrid(1, min(NY, prop.maxGridSize[1])); dim3 dimRightWallBlock(1); lbm_sim->dimRightWallGrid = dimRightWallGrid; lbm_sim->dimRightWallBlock = dimRightWallBlock; lbm_sim->shared_mem_size = 6 * sizeof(double) * BLOCK_SIZE; return lbm_sim; } void lbm_simulation_destroy(lbm_simulation* lbm_sim) { HANDLE_ERROR(cudaFree(lbm_sim->d_vars)); free(lbm_sim); } void lbm_simulation_update(lbm_simulation* lbm_sim) { if (lbm_sim->switch_f0_f1) { HANDLE_KERNEL_ERROR(lbm_right_wall<<<lbm_sim->dimRightWallGrid, lbm_sim->dimRightWallBlock>>>(&lbm_sim->d_vars->f1)); HANDLE_KERNEL_ERROR(lbm_computation<<<lbm_sim->dimComputationGrid, lbm_sim->dimComputationBlock, lbm_sim->shared_mem_size>>>(lbm_sim->d_vars, &lbm_sim->d_vars->f1, &lbm_sim->d_vars->f0)); } else { HANDLE_KERNEL_ERROR(lbm_right_wall<<<lbm_sim->dimRightWallGrid, lbm_sim->dimRightWallBlock>>>(&lbm_sim->d_vars->f0)); HANDLE_KERNEL_ERROR(lbm_computation<<<lbm_sim->dimComputationGrid, lbm_sim->dimComputationBlock, lbm_sim->shared_mem_size>>>(lbm_sim->d_vars, &lbm_sim->d_vars->f0, &lbm_sim->d_vars->f1)); } lbm_sim->switch_f0_f1 = ! lbm_sim->switch_f0_f1; } void lbm_simulation_get_size(lbm_simulation* lbm_sim, size_t* width, size_t* height) { *width = NX; *height = NY; } void lbm_lattices_read(lbm_simulation* lbm_sim, lbm_lattices* lat) { lbm_lattices* d_lat = lbm_sim->switch_f0_f1 ? &lbm_sim->d_vars->f1 : &lbm_sim->d_vars->f0; HANDLE_ERROR(cudaMemcpy(lat, d_lat, sizeof(lbm_lattices), cudaMemcpyDeviceToHost)); } void lbm_u_read(lbm_simulation* lbm_sim, lbm_u* u) { HANDLE_ERROR(cudaMemcpy(u->u0, lbm_sim->d_vars->u0, sizeof(double)*NL, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(u->u1, lbm_sim->d_vars->u1, sizeof(double)*NL, cudaMemcpyDeviceToHost)); } lbm_lattices* lbm_lattices_create() { return (lbm_lattices*) malloc(sizeof(lbm_lattices)); } void lbm_lattices_destroy(lbm_lattices* lat) { free(lat); } lbm_u* lbm_u_create() { return (lbm_u*) malloc(sizeof(lbm_u)); } void lbm_u_destroy(lbm_u* u) { free(u); } void lbm_lattices_at_index(lbm_lattice* lattice, lbm_lattices* lattices, int x, int y) { int gi = IDX(x,y); lbm_lattices* lat = (lbm_lattices*) lattices; lattice->ne = lat->ne[gi]; lattice->e = lat->e [gi]; lattice->se = lat->se[gi]; lattice->n = lat->n [gi]; lattice->c = lat->c [gi]; lattice->s = lat->s [gi]; lattice->nw = lat->nw[gi]; lattice->w = lat->w [gi]; lattice->sw = lat->sw[gi]; } void lbm_u_at_index(double* u0, double* u1, lbm_u* u, int x, int y) { int gi = IDX(x,y); *u0 = u->u0[gi]; *u1 = u->u1[gi]; }
125786e8da844f85699d0405c25bbf4ae94ec10f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_back; int xdim0_update_halo_kernel4_plus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_back; int ydim0_update_halo_kernel4_plus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_back; int xdim1_update_halo_kernel4_plus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_back; int ydim1_update_halo_kernel4_plus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_back*(y)+xdim0_update_halo_kernel4_plus_2_back*ydim0_update_halo_kernel4_plus_2_back*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_back*(y)+xdim1_update_halo_kernel4_plus_2_back*ydim1_update_halo_kernel4_plus_2_back*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_back_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,2)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_back( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_back * ydim0_update_halo_kernel4_plus_2_back; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_back * ydim1_update_halo_kernel4_plus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_back_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); OPS_kernels[80].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_back_h || ydim0 != ydim0_update_halo_kernel4_plus_2_back_h || xdim1 != xdim1_update_halo_kernel4_plus_2_back_h || ydim1 != ydim1_update_halo_kernel4_plus_2_back_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_back, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_back_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_back, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_back_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_back, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_back_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_back, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[80].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 80; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 80; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
125786e8da844f85699d0405c25bbf4ae94ec10f.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_back; int xdim0_update_halo_kernel4_plus_2_back_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_back; int ydim0_update_halo_kernel4_plus_2_back_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_back; int xdim1_update_halo_kernel4_plus_2_back_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_back; int ydim1_update_halo_kernel4_plus_2_back_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_back*(y)+xdim0_update_halo_kernel4_plus_2_back*ydim0_update_halo_kernel4_plus_2_back*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_back*(y)+xdim1_update_halo_kernel4_plus_2_back*ydim1_update_halo_kernel4_plus_2_back*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_back_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,2)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_back( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_back + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_back * ydim0_update_halo_kernel4_plus_2_back; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_back + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_back * ydim1_update_halo_kernel4_plus_2_back; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_back_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_back_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,80)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); OPS_kernels[80].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_back_h || ydim0 != ydim0_update_halo_kernel4_plus_2_back_h || xdim1 != xdim1_update_halo_kernel4_plus_2_back_h || ydim1 != ydim1_update_halo_kernel4_plus_2_back_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_back, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_back_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_back, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_back_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_back, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_back_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_back, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_back_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[80].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[80].mpi_time += t2-t1; OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[80].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_back(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 80; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 80; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_back_execute; if (OPS_diags > 1) { ops_timing_realloc(80,"update_halo_kernel4_plus_2_back"); } ops_enqueue_kernel(desc); } #endif
87b4d093e36105a729530f7ae6d87dd452368c5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/reduce_function.h" namespace phi { using funcs::IndexCalculator; template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; out[pos0] = x[pos1] * y[pos2] - x[pos2] * y[pos1]; out[pos1] = x[pos2] * y[pos0] - x[pos0] * y[pos2]; out[pos2] = x[pos0] * y[pos1] - x[pos1] * y[pos0]; } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; int dims0 = 1; int dims1 = 1; for (auto i = 0; i < input_x_dims.size(); i++) { full_strides.insert(full_strides.begin(), dims0); dims0 *= input_x_dims[input_x_dims.size() - i - 1]; if (i == dim) { continue; } cal_dims.push_back(i); left_strides.insert(left_strides.begin(), dims1); dims1 *= input_x_dims[input_x_dims.size() - i - 1]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = IndexCalculator( input_x_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); hipLaunchKernelGGL(( Cross), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), input_x_data, input_y_data, out_data, full_strides[dim], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL( cross, GPU, ALL_LAYOUT, phi::CrossKernel, float, double, int, int64_t) {}
87b4d093e36105a729530f7ae6d87dd452368c5a.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/reduce_function.h" namespace phi { using funcs::IndexCalculator; template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; out[pos0] = x[pos1] * y[pos2] - x[pos2] * y[pos1]; out[pos1] = x[pos2] * y[pos0] - x[pos0] * y[pos2]; out[pos2] = x[pos0] * y[pos1] - x[pos1] * y[pos0]; } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; int dims0 = 1; int dims1 = 1; for (auto i = 0; i < input_x_dims.size(); i++) { full_strides.insert(full_strides.begin(), dims0); dims0 *= input_x_dims[input_x_dims.size() - i - 1]; if (i == dim) { continue; } cal_dims.push_back(i); left_strides.insert(left_strides.begin(), dims1); dims1 *= input_x_dims[input_x_dims.size() - i - 1]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = IndexCalculator( input_x_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); Cross<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(input_x_data, input_y_data, out_data, full_strides[dim], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL( cross, GPU, ALL_LAYOUT, phi::CrossKernel, float, double, int, int64_t) {}
ea9ada20eef34ce92b85bab0a05cb786f41e5f97.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <rocblas.h> #include <vector> #include <string> #include <iostream> #include <fstream> #include <cstring> #define IMAGE_SAVE #define IMAGE_LOAD #include "image.hpp" #include "autoencoder.hpp" #define HIDDEN 10 #define PS 160 #define SZ (PS*PS) int main() { using namespace ml::image; using ml::autoencoder::Autoencoder; hipInit(0); std::vector<Image *> images; std::string filename; std::ifstream ifs("files.txt"); while (std::getline(ifs, filename)) { printf("Loading %s\n", filename.c_str()); Image *img = load_image(filename.c_str(), 1); Image *rmg = img->resize(PS, PS); Image *data = edge_detection<64, 64>(rmg); delete rmg; delete img; images.push_back(data); } Autoencoder encoder(SZ, HIDDEN); Image *res = new Image(PS, PS, 1); float err; do { err = 0.0f; for (int i = 0; i < images.size(); ++i) { Image *data = images[i]; encoder.propagate(data->h_data); err += encoder.err() / images.size(); /*res->copyDevice(encoder.output_o);*/ /*res->download();*/ /*float test[10];*/ /*hipMemcpy(test, encoder.output_h, sizeof(float) * 10, hipMemcpyDeviceToHost);*/ /*for (int i = 0; i < 10; ++i) {*/ /*printf("%3.1f ", test[i]);*/ /*}*/ /*printf("\n");*/ /*for (int i = 0; i < PS; ++i) {*/ /*for (int j = 0; j < PS; ++j) {*/ /*const int idx = i * PS + j;*/ /*printf("%d", res->h_data[idx] > 0.1f);*/ /*}*/ /*printf("\n");*/ /*}*/ /*printf("\n");*/ /*printf("err: %f\n", err);*/ encoder.backpropagate(); } printf("ERROR: %e\n", err); for (int i = 0; i < HIDDEN; ++i) { char filename[256]; sprintf(filename, "res-%d.bmp", i); encoder.visualize(i, res->d_data); res->download(); res->write(filename); } } while (err > 1E-01); return 0; }
ea9ada20eef34ce92b85bab0a05cb786f41e5f97.cu
#include <cuda.h> #include <cublas_v2.h> #include <vector> #include <string> #include <iostream> #include <fstream> #include <cstring> #define IMAGE_SAVE #define IMAGE_LOAD #include "image.hpp" #include "autoencoder.hpp" #define HIDDEN 10 #define PS 160 #define SZ (PS*PS) int main() { using namespace ml::image; using ml::autoencoder::Autoencoder; cuInit(0); std::vector<Image *> images; std::string filename; std::ifstream ifs("files.txt"); while (std::getline(ifs, filename)) { printf("Loading %s\n", filename.c_str()); Image *img = load_image(filename.c_str(), 1); Image *rmg = img->resize(PS, PS); Image *data = edge_detection<64, 64>(rmg); delete rmg; delete img; images.push_back(data); } Autoencoder encoder(SZ, HIDDEN); Image *res = new Image(PS, PS, 1); float err; do { err = 0.0f; for (int i = 0; i < images.size(); ++i) { Image *data = images[i]; encoder.propagate(data->h_data); err += encoder.err() / images.size(); /*res->copyDevice(encoder.output_o);*/ /*res->download();*/ /*float test[10];*/ /*cudaMemcpy(test, encoder.output_h, sizeof(float) * 10, cudaMemcpyDeviceToHost);*/ /*for (int i = 0; i < 10; ++i) {*/ /*printf("%3.1f ", test[i]);*/ /*}*/ /*printf("\n");*/ /*for (int i = 0; i < PS; ++i) {*/ /*for (int j = 0; j < PS; ++j) {*/ /*const int idx = i * PS + j;*/ /*printf("%d", res->h_data[idx] > 0.1f);*/ /*}*/ /*printf("\n");*/ /*}*/ /*printf("\n");*/ /*printf("err: %f\n", err);*/ encoder.backpropagate(); } printf("ERROR: %e\n", err); for (int i = 0; i < HIDDEN; ++i) { char filename[256]; sprintf(filename, "res-%d.bmp", i); encoder.visualize(i, res->d_data); res->download(); res->write(filename); } } while (err > 1E-01); return 0; }
43ece79be7129e5d32a5ecb2af093d2fd32b2ff5.hip
// !!! This is a file automatically generated by hipify!!! //, (, ) #include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctime> //---------------------------------------------- // (GPU) ** __global__ , void** __global__ void gpu_add(float* c, float* a, float* b, int n){ int j=blockIdx.x*blockDim.x+threadIdx.x; c[j]=a[j]+b[j]; } //---------------------------------------------- // (Host) void host_add(float* c, float* a, float* b, int n){ for(int k=0; k<n; k++){ c[k]=a[k]+b[k]; } } //---------------------------------------------- // double diff(float* a, float* b, int n){ double s=0, r=0; for(int k=0; k<n; k++){ double w=a[k]-b[k]; s+=w*w; r+=a[k]*a[k]; } return sqrt(s/r); // } //---------------------------------------------- // (:) double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //---------------------------------------------- // int main(){ // int n=1024*1024; int size=n*sizeof(float); // int block=512; //blockDim () int grid=n/block; //gridDim () // () int loop=100; // float *a,*b,*c,*d; a=(float*)malloc(size); b=(float*)malloc(size); c=(float*)malloc(size); d=(float*)malloc(size); // srand(time(0)); for(int k=0; k<n; k++){ a[k]=(float)rand()/RAND_MAX*2-1; b[k]=(float)rand()/RAND_MAX*2-1; } // float *ga,*gb,*gc; hipMalloc((void**)&ga, size); hipMalloc((void**)&gb, size); hipMalloc((void**)&gc, size); // a,b hipMemcpy(ga, a, size, hipMemcpyHostToDevice); hipMemcpy(gb, b, size, hipMemcpyHostToDevice); //---- part 1 : -------- // (GPU) hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); // (Host) host_add(d, a, b, n); // hipMemcpy(c, gc, size, hipMemcpyDeviceToHost); // printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //---- part 2 : -------- // GPU double gpu_dt = ms_time(); for(int w=0; w<loop; w++){ hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); hipDeviceSynchronize(); // } gpu_dt = (ms_time()-gpu_dt)/loop; // // Host double host_dt = ms_time(); for(int w=0; w<loop; w++){ host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; // // printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); // free(a); free(b); free(c); free(d); // hipFree(ga); hipFree(gb); hipFree(gc); return 0; }
43ece79be7129e5d32a5ecb2af093d2fd32b2ff5.cu
//多區塊, 多執行緒 (不使用迴圈, 用網格與區塊設定代替) #include <cuda.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctime> //---------------------------------------------- //向量加法的運算核心 (GPU) **函式前加 __global__ 即為核心, 核心只傳回 void** __global__ void gpu_add(float* c, float* a, float* b, int n){ int j=blockIdx.x*blockDim.x+threadIdx.x; c[j]=a[j]+b[j]; } //---------------------------------------------- //向量加法的一般函式 (Host) void host_add(float* c, float* a, float* b, int n){ for(int k=0; k<n; k++){ c[k]=a[k]+b[k]; } } //---------------------------------------------- //計算誤差用的函式 double diff(float* a, float* b, int n){ double s=0, r=0; for(int k=0; k<n; k++){ double w=a[k]-b[k]; s+=w*w; r+=a[k]*a[k]; } return sqrt(s/r); //相對誤差 } //---------------------------------------------- //時間函數 (傳回單位:千分之一秒) double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //---------------------------------------------- //主程式 int main(){ //設定向量大小 int n=1024*1024; int size=n*sizeof(float); //網格與區塊設定 int block=512; //blockDim (每個區塊具有的執行緒數) int grid=n/block; //gridDim (每個網格具有的區塊數) //設定呼叫次數 (測量平均效能) int loop=100; //配置主機記憶體 float *a,*b,*c,*d; a=(float*)malloc(size); b=(float*)malloc(size); c=(float*)malloc(size); d=(float*)malloc(size); //設定亂數的輸入向量 srand(time(0)); for(int k=0; k<n; k++){ a[k]=(float)rand()/RAND_MAX*2-1; b[k]=(float)rand()/RAND_MAX*2-1; } //配置顯示卡記憶體 float *ga,*gb,*gc; cudaMalloc((void**)&ga, size); cudaMalloc((void**)&gb, size); cudaMalloc((void**)&gc, size); //載入向量 a,b 到顯示卡記憶體中 cudaMemcpy(ga, a, size, cudaMemcpyHostToDevice); cudaMemcpy(gb, b, size, cudaMemcpyHostToDevice); //---- part 1 : 測量精確度 -------- //呼叫核心來運算 (GPU) gpu_add<<<grid, block>>>(gc, ga, gb, n); //呼叫一般函數來運算 (Host) host_add(d, a, b, n); //把計算結果存回主機 cudaMemcpy(c, gc, size, cudaMemcpyDeviceToHost); //比較兩者差異 printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //---- part 2 : 測量效能 -------- //測量 GPU 核心效能 double gpu_dt = ms_time(); for(int w=0; w<loop; w++){ gpu_add<<<grid, block>>>(gc, ga, gb, n); cudaThreadSynchronize(); //避免核心執行不完全 } gpu_dt = (ms_time()-gpu_dt)/loop; //平均時間 //測量 Host 函數效能 double host_dt = ms_time(); for(int w=0; w<loop; w++){ host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; //平均時間 //輸出平均執行時間 printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); //釋放主機記憶體 free(a); free(b); free(c); free(d); //釋放顯示卡記憶體 cudaFree(ga); cudaFree(gb); cudaFree(gc); return 0; }
08e802bd0d345611b0df8e8ed5583190dbaf9040.hip
// !!! This is a file automatically generated by hipify!!! // nvcc -std=c++11 GPU_main.cu -o GPU_main -lrealsense2 -lboost_iostreams -lboost_system -lboost_filesystem `pkg-config opencv --cflags --libs` -lpthread -Wno-deprecated-gpu-targets #include <opencv2/opencv.hpp> #include <iostream> #include <fstream> #include <vector> #include <cmath> #include <unistd.h> #include <mutex> #include <thread> #include <atomic> #include <chrono> #include <time.h> #include <boost/tuple/tuple.hpp> #include "../include/Voxel.cuh" #include "../include/Logging.hpp" int main(int argc, char const *argv[]) { std::atomic_bool alive {true}; hipDeviceReset(); hipDeviceSetLimit(hipLimitPrintfFifoSize, 10ull*1024ull*1024ull); hipThreadSetLimit (hipLimitMallocHeapSize, 2048ull*1024ull*1024ull); /* Map Front End */ Map_FE * F = new GPU_FE(); /* Camera Initialization */ Camera C; Bool_Init bC = C.Init(); if (bC.t265 && bC.d435) std::cout << "Cameras initialized\n"; else std::cout << "Atleast one camera is not connected\n"; /* Logger Initialization */ Logger L; L.Init(); /* Thread for checking exit condition */ std::thread exit_check([&]() { while (alive) { if (std::cin.get() == ' ') { cv::destroyAllWindows(); alive = false; } } }); /* Thread for receiving frames and storing them as video and csv files */ std::thread rxFrame([&]() { while (alive) { auto sleep_start = std::chrono::high_resolution_clock::now(); auto tframe = C.pipelines[0].wait_for_frames(); auto dframe = C.pipelines[1].wait_for_frames(); auto t = tframe.first_or_default(RS2_STREAM_POSE); auto d = dframe.get_depth_frame(); if (!t || !d) continue; C.t_queue.enqueue(tframe); C.d_queue.enqueue(dframe); // sleep for remaining time auto time_sleep = std::chrono::high_resolution_clock::now() - sleep_start; double time_s = std::chrono::duration_cast<std::chrono::milliseconds>(time_sleep).count(); if ((1000.0/INPUT_RATE)-time_s > 0){ usleep((1000.0/INPUT_RATE-time_s) * 1000); } // std::cout << time_s << "\n"; } }); //bool en = false; rs2::frameset t_frameset, d_frameset; auto start = std::chrono::high_resolution_clock::now(); while (alive) { C.t_queue.poll_for_frame(&t_frameset); C.d_queue.poll_for_frame(&d_frameset); if (t_frameset && d_frameset) { auto depthFrame = d_frameset.get_depth_frame(); auto poseFrame = t_frameset.first_or_default(RS2_STREAM_POSE); cv::Mat depth(cv::Size(w, h), CV_16UC1, (void *)depthFrame.get_data(), cv::Mat::AUTO_STEP); auto pose = poseFrame.as<rs2::pose_frame>().get_pose_data(); /* update global map */ //if (!en) { F->Update (C, pose, depth); //en = true; //} /* */ auto elapsed = std::chrono::high_resolution_clock::now() - start; float microseconds = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); std::cout << microseconds << "\n"; L.Log(&C, &pose, &depth); } start = std::chrono::high_resolution_clock::now(); } rxFrame.join(); L.Close(&C, F); std::cout << "Program terminated sucessfully\n"; return 0; }
08e802bd0d345611b0df8e8ed5583190dbaf9040.cu
// nvcc -std=c++11 GPU_main.cu -o GPU_main -lrealsense2 -lboost_iostreams -lboost_system -lboost_filesystem `pkg-config opencv --cflags --libs` -lpthread -Wno-deprecated-gpu-targets #include <opencv2/opencv.hpp> #include <iostream> #include <fstream> #include <vector> #include <cmath> #include <unistd.h> #include <mutex> #include <thread> #include <atomic> #include <chrono> #include <time.h> #include <boost/tuple/tuple.hpp> #include "../include/Voxel.cuh" #include "../include/Logging.hpp" int main(int argc, char const *argv[]) { std::atomic_bool alive {true}; cudaDeviceReset(); cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 10ull*1024ull*1024ull); cudaThreadSetLimit (cudaLimitMallocHeapSize, 2048ull*1024ull*1024ull); /* Map Front End */ Map_FE * F = new GPU_FE(); /* Camera Initialization */ Camera C; Bool_Init bC = C.Init(); if (bC.t265 && bC.d435) std::cout << "Cameras initialized\n"; else std::cout << "Atleast one camera is not connected\n"; /* Logger Initialization */ Logger L; L.Init(); /* Thread for checking exit condition */ std::thread exit_check([&]() { while (alive) { if (std::cin.get() == ' ') { cv::destroyAllWindows(); alive = false; } } }); /* Thread for receiving frames and storing them as video and csv files */ std::thread rxFrame([&]() { while (alive) { auto sleep_start = std::chrono::high_resolution_clock::now(); auto tframe = C.pipelines[0].wait_for_frames(); auto dframe = C.pipelines[1].wait_for_frames(); auto t = tframe.first_or_default(RS2_STREAM_POSE); auto d = dframe.get_depth_frame(); if (!t || !d) continue; C.t_queue.enqueue(tframe); C.d_queue.enqueue(dframe); // sleep for remaining time auto time_sleep = std::chrono::high_resolution_clock::now() - sleep_start; double time_s = std::chrono::duration_cast<std::chrono::milliseconds>(time_sleep).count(); if ((1000.0/INPUT_RATE)-time_s > 0){ usleep((1000.0/INPUT_RATE-time_s) * 1000); } // std::cout << time_s << "\n"; } }); //bool en = false; rs2::frameset t_frameset, d_frameset; auto start = std::chrono::high_resolution_clock::now(); while (alive) { C.t_queue.poll_for_frame(&t_frameset); C.d_queue.poll_for_frame(&d_frameset); if (t_frameset && d_frameset) { auto depthFrame = d_frameset.get_depth_frame(); auto poseFrame = t_frameset.first_or_default(RS2_STREAM_POSE); cv::Mat depth(cv::Size(w, h), CV_16UC1, (void *)depthFrame.get_data(), cv::Mat::AUTO_STEP); auto pose = poseFrame.as<rs2::pose_frame>().get_pose_data(); /* update global map */ //if (!en) { F->Update (C, pose, depth); //en = true; //} /* */ auto elapsed = std::chrono::high_resolution_clock::now() - start; float microseconds = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); std::cout << microseconds << "\n"; L.Log(&C, &pose, &depth); } start = std::chrono::high_resolution_clock::now(); } rxFrame.join(); L.Close(&C, F); std::cout << "Program terminated sucessfully\n"; return 0; }
bd1c78597b34332284574229a3a89d7f44f0eecf.hip
// !!! This is a file automatically generated by hipify!!! ////////////////////////////////////////////////////////////////////////////////// /// /// @sandmanCUDA.cu /// @author Phil Bentley <[email protected] /// @version 1.0 /// /// @section LICENSE /// /// BSD 3-Clause License /// /// Copyright (c) 2016, Phil Bentley /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright notice, this /// list of conditions and the following disclaimer. /// /// * Redistributions in binary form must reproduce the above copyright notice, /// this list of conditions and the following disclaimer in the documentation /// and/or other materials provided with the distribution. /// /// * Neither the name of the copyright holder nor the names of its /// contributors may be used to endorse or promote products derived from /// this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE /// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL /// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR /// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE /// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// /// /// @section DESCRIPTION /// /// Sandman is a ridiculously fast monte-carlo code for simulating /// polychromatic neutron beams. /// /// Sandman uses the math in neutron acceptance diagram shading /// (nads, which is monochromatic) to implement a monte-carlo method /// of ray tracing, by breaking up the simulation into two /// independent planes with finite phase space boundaries. This is /// significantly faster than 3D tracing plane intersections, even /// though it produces mathematically identical output. The /// limitation is that it can only simulate beams where the /// horizontal and vertical phase spaces are independent /// (e.g. rectangular neutron guides). /// /// This code provides to the user a shared library (.so) which you /// can install on your system. Thereafter, you create a sandman /// program that represents the instrument simulation and link this /// library. Calling the sandman class public functions creates a /// simulation of a neutron beam on an NVIDIA GPU using NVIDIA'S /// CUDA API. /// /// There are some fundamental differences between this code and /// existing codes at the time of writing. /// /// The geometry definition begins at the SAMPLE POSITION, and works /// backwards. This is for very good reason. Start with the phase /// space you need, and work from there. It's also orders of /// magnitude quicker in most cases to work like this. To handle /// this reverse tracing method, sandman's beam monitors and /// calculations have been specially written in a way to provide /// correct results in the backwards or forwards direction. For /// example, the beam monitor functions store a copy of the position /// of the neutrons, and mirror the divergence; then at the end of /// the calculation the statistical weight is calculated, so that /// the beam profile at that position matches the result that VITESS /// or MCSTAS would give you when simulating forwards. Nonetheless, /// there is nothing in the code that prevents you from doing a /// forwards simulation --- if you insist! Just define a sample /// with sandman that is the same size as the moderator, and a /// sandman moderator that is the same size as the instrument /// sample, and set the mirror image parameter in the relevant /// monitor functions to "false". /// ////////////////////////////////////////////////////////////////////////////////// #include <hiprand/hiprand.h> #include <helper_cuda.h> #include <hip/hip_runtime_api.h> #include <math.h> #include <fstream> #include <iostream> #include "../include/sandmanCUDA.h" #define DEBUG 1 // Physical Constants const static float thetaCritNickel=0.099138f; #define NICKEL_REFLECTIVITY 0.967f const static float thetaCritStandardLambda = 1.0f; const static int maxElements = 100000000; const static float deadWeight = 0.001f; const static float PI_FLOAT = 3.1415927f; // Define colours for terminal output text highlighting const static std::string color_red("\033[0;31m"); const static std::string color_green("\033[1;32m"); const static std::string color_yellow("\033[1;33m"); const static std::string color_cyan("\033[0;36m"); const static std::string color_magenta("\033[0;35m"); const static std::string color_reset("\033[0m"); std::string remove_extension(const std::string& filename) { size_t lastdot = filename.find_last_of("."); if (lastdot == std::string::npos) return filename; return (filename.substr(0, lastdot)); } __host__ __device__ static inline float radians2degrees(const float radians) { return(radians * 180.0f / PI_FLOAT); } __host__ __device__ static inline float degrees2radians(const float degrees) { return(degrees * PI_FLOAT / 180.0f); } __host__ __device__ static inline float square2circleFlux(const float num) { //Ratio of area of circle to area of square is PI/4 return ( num / (PI_FLOAT / 4.0f)); } __host__ __device__ static float elliptic_curve(const float xpos, const float fp1, const float fp2, const float maxWidth) { //An ellipse where the entrance width is specified //find centre of ellipse independent of order of fp1 and fp2 const float x0 = (fp1 + fp2) / 2.0f; //find the absolute focal point relative to x0 float f = fabsf((fp2 - fp1) / 2.0f); //b value is half the width float b = maxWidth / 2.0f; //return the y value of the curve float y2 = b*b - (xpos-x0)*(xpos-x0)*b*b/(f*f-b*b); return(sqrtf(fabsf(y2))); } __host__ __device__ static float parabolic_closing_curve(float xpos, float foc, float inw) { float x0; float ans; x0 = (2.0f * foc + sqrtf(4.0f * foc * foc + inw * inw)) / 4.0f; ans = 2.0f * sqrtf((foc - x0) * (xpos - x0)); return (ans); } __host__ __device__ static float parabolic_opening_curve(float xpos, float len, float foc, float outw) { float x0; float ans; x0 = (2.0f*foc + 2.0f*len - sqrtf(4.0f*foc*foc - 8.0f*foc*len + 4.0f*len*len + outw*outw))/4.0f; ans = 2.0f*sqrtf((foc - x0)*(-x0 + xpos)); return (ans); } __global__ static void global_countNeutrons0(float *numNeutrons, const float *weightH, const float *weightV, const int numElements) { __shared__ float sharedTotal; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) //Boss thread zeros the shared counter if(tid == 0) { sharedTotal=0.0f; } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //Any pair could actually be multiplied in this step, assuming //no horizontal and vertical correlations atomicAdd(&sharedTotal, weightH[i]*weightV[i]); } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal); } } __syncthreads(); } // Array reduction routines. Tried many of these, some are faster than others. __device__ void blockReduce1(float *array) { // Interleaved addressing, reduction #1 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=1; s < SANDMAN_CUDA_THREADS; s*=2) { if(tid % (2*s) == 0) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce2(float *array) { // Interleaved addressing, reduction #2 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; int index; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=1; s < SANDMAN_CUDA_THREADS; s*=2) { index = 2 * s*tid; if(index < SANDMAN_CUDA_THREADS) { sharedTotal[index] += sharedTotal[index +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce3(float *array) { // Sequential addressing, reduction #3 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=SANDMAN_CUDA_THREADS/2; s > 0; s>>=1) { if(tid < s) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce4_DO_NOT_USE(float *array) { //DOES NOT WORK! There is a bug somewhere... // Sequential addressing plus uroll loops __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=SANDMAN_CUDA_THREADS/2; s > 32; s>>=1) { if(tid < s) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } if(tid <= 32) { sharedTotal[tid] += sharedTotal[tid+32]; sharedTotal[tid] += sharedTotal[tid+16]; sharedTotal[tid] += sharedTotal[tid+8]; sharedTotal[tid] += sharedTotal[tid+4]; sharedTotal[tid] += sharedTotal[tid+2]; sharedTotal[tid] += sharedTotal[tid+1]; } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } __syncthreads(); } __global__ static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float deltaLambda, const float *modFlux, const int numTraj) { //Shared memory per thread block We can just use 512 knowing that the number //of threads will be 128 or 256 or something __shared__ float sharedTotal[512]; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; const float nTraj = (float) numTraj; float element; //Try doing this one neutron per thread, for fun - and simpler code ;) //All threads zero their shared counter //sharedTotal[i]=0.0f; //__syncthreads(); //Probably not needed until the last moment before block reduction //Each thread adds the weight of its neutron to the shared total if(i<numTraj) { //sharedTotal[tid] = modFlux[i]*weightH[i]*weightV[i]/nElements; element = weightH[i] * weightV[i]; //in units of fractions of neutrons if(modFlux != NULL) { //Don't just calculate efficiency, user has specified a moderator. //In this case, each fractional neutron should be scaled by the //moderator brightness sampled by that trajectory, and then //normalised to the full simulation wavelength band and number of //trajectories if(nTraj > 0.0) element = element * deltaLambda * modFlux[i] / nTraj; else element = 0.0; } //if(isnan(element)) //element = 0.0; sharedTotal[tid] = element; __syncthreads(); // do block reduction on the shared memory using NVIDIA's tree method blockReduce1(sharedTotal); //Boss thread sums shared total and adds to the global total if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal[0]); } __syncthreads(); } } __global__ static void global_countTrajectories(float *numNeutrons, const float *weightH, const float *weightV, const int numElements) { //Shared memory per thread block __shared__ float sharedTotal[512]; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) //All threads zero their shared counter //sharedTotal[i]=0.0f; //__syncthreads(); //Probably not needed until the last moment before block reduction //Each thread adds the weight of its neutron to the shared total if(i<numElements) { sharedTotal[tid] = weightH[i]*weightV[i]; //if(isnan(sharedTotal[tid])) //sharedTotal[tid] = 0.0; __syncthreads(); // do block reduction on the shared memory using NVIDIA's tree method blockReduce3(sharedTotal); //Boss thread sums shared total and adds to the global total if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal[0]); } __syncthreads(); } } ///Compresses the beam into a single virtual channel of the bender, to ///model a multi-channel bender by a single channel process (n ///channels would have n branches otherwise). __global__ static void global_squeezeBenderChannel(float *ypos, float *channelNumber, const float width, const float channelWidth, const float waferThickness, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; float relativeY; if(i<numElements) { relativeY = ypos[i] + width/2.0f; //The 'channelWidth' is defined as the empty space plus one //wafer thickness //Which channel does the neutron hit? Channel number starts at zero channelNumber[i] = floorf( relativeY / channelWidth ); //Then we adjust the position to be within a single channel of //the right thickness for the OPTICS ypos[i] = relativeY; ypos[i] = ypos[i] / (channelNumber[i]+1.0f); ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); } } ///Reverses the compression of the beam into a single channel of the ///bender, to model a multi-channel bender by a single channel process ///(n channels would have n branches otherwise). __global__ static void global_unSqueezeBenderChannel(float *ypos, const float *channelNumber, const float width, const float channelWidth, const float waferThickness, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; //float preY=0.0; if(i<numElements) { //float preY = ypos[i]; //Device reverses the position adjustment of sandSqueeze... ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); ypos[i] = ypos[i] * (channelNumber[i]+1.0f); ypos[i] = ypos[i] - width/2.0f; } } __global__ static void global_copyArray(const float *source, float *destination, const int numElements, const bool invert) { // Copies the values from one array to another int i = blockIdx.x*blockDim.x + threadIdx.x; //These nested conditionals do not cause branching problems because all //threads evaluate the same path if(i<numElements) { if(invert) destination[i] = -source[i]; else destination[i] = source[i]; } } __host__ __device__ static float maxwellian(const float brightness0, const float tempK, const float lambda_A) { //Describes a maxwellian curve based on the given parameters. This //maxwellian is the same curve as used by existing codes, so it //should agree with those (nads, mcstas) //Defined __host__ __device__ so it can be unit tested if required const float h = 6.626076E-34; const float m = 1.6749284E-27; const float k = 1.380662E-23; const float a=(1.0E10*1.0E10*h*h)/(2.0*k*m*tempK); return( brightness0*2.0*a*a*exp(-a/(lambda_A*lambda_A))/pow(lambda_A,5.0) ); } __host__ __device__ static float psiMODERATOR(const float lambda_A) { //Defined __host__ __device__ so it can be unit tested if required return( maxwellian(4.035E12f, 103.97f, lambda_A) + maxwellian(2.503E12, 25.56f, lambda_A) + maxwellian(1.399E13, 298.411f, lambda_A) ); } __host__ __device__ static float illHCS(const float lambda_A) { //Defined __host__ __device__ so it can be unit tested if required return( maxwellian(2.78E13f, 40.1f, lambda_A) + maxwellian(3.44E13, 145.8f, lambda_A) + maxwellian(1.022E13, 413.5f, lambda_A) ); } __global__ static void global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = 0.206f/2.0f; ymin = -ymax; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * illHCS(d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } } __global__ static void global_sandPSIModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = 0.206f/2.0f; ymin = -ymax; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * psiMODERATOR(d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } } __global__ static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = hoffset + width/2.0; ymin = hoffset - width/2.0; int i = blockIdx.x*blockDim.x + threadIdx.x; //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * maxwellian(num, temp, d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } __global__ static void global_sandBrillianceTransferModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset) { // Simple brilliance transfer moderator float ymax, ymin; ymax = hoffset + width/2.0; ymin = hoffset - width/2.0; int i = blockIdx.x*blockDim.x + threadIdx.x; //Modify the weight if the neutron misses if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } d_modFluxH[i] = 1.0; } __global__ static void global_sandSampleCUDA(float *d_pointsY, float *d_pointsTheta, float *d_weight, const float *d_r1, const float *d_r2, const float ox, const float oy, const float v1x, const float v2x, const float v2y, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_pointsY[i] = oy + d_r2[i]*v2y; d_pointsTheta[i] = ox + d_r1[i]*v1x + d_r2[i]*v2x; d_weight[i] = 1.0; } } __global__ static void global_initArray(float *d_array, const float value, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_array[i] = value; } } __global__ static void global_sandZeroHistogram1D(float d_histogram[100]) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<100) { { d_histogram[i] = 0.0f; } } } __global__ static void global_sandZeroHistogram2D(float d_histogram[100][100]) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j; if(i<100) { for(j=0; j<100; j++) { d_histogram[i][j] = 0.0f; } } } __global__ static void global_sandSkewCUDA(float *d_pointsY, const float *d_pointsTheta, const float distance_m, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsY[i] = d_pointsY[i] + distance_m * d_pointsTheta[i]; } } } __global__ static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsTheta[i] = d_pointsTheta[i] - angle_radians; } } } __global__ static void global_translation(float *d_pointsY, const float distance_m, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsY[i] = d_pointsY[i] - distance_m; } } } __global__ static void global_roll_phase_space(float *d_hY, float *d_hQ, float *d_hw, float *d_vY, float *d_vQ, float *d_vw, const float theta, const int numElements) { // Rotates and mixes the phase space to simulate a rotation of the coordinate system around the beam axis // Once thought to be impossible, now horizontal and vertical phase space is mixed. FTW int i = blockIdx.x*blockDim.x + threadIdx.x; const float thrad = degrees2radians(theta); const float cth = cosf(thrad); const float sth = sinf(thrad); const float c2 = cth*cth; const float s2 = sth*sth; float vposp =0.0; float hposp =0.0; float vthp = 0.0; float hthp = 0.0; float vwp = 0.0; float hwp = 0.0; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Calculate the new positions after the rotation vposp = d_vY[i] * cth - d_hY[i] * sth; hposp = d_hY[i] * cth + d_vY[i] * sth; //Calculate the new divergences after the rotation vthp = d_vQ[i] * cth - d_hQ[i] * sth; hthp = d_hQ[i] * cth + d_vQ[i] * sth; //Calculate the new weights after the rotation vwp = d_vw[i] * c2 + d_hw[i] * s2; hwp = d_hw[i] * c2 + d_vw[i] * s2; //Copy all the results back into the arrays in place d_vY[i] = vposp; d_hY[i] = hposp; d_vQ[i] = vthp; d_hQ[i] = hthp; d_vw[i] = vwp; d_hw[i] = hwp; } } } __device__ inline static float low_pass_filter(const float value, const float cutOff) { // This function uses approximation to heaviside function with approximate // tanh running on hardware to avoid a branching if statement. Important // for thread divergence. return( 0.5f + 0.5f*tanh(2000.0f*(-value+cutOff))); } __device__ inline static float high_pass_filter(const float value, const float cutOff) { // High pass filter. values greater than cutOff have > 0 return value // This function uses approximation to heaviside function with approximate // tanh running on hardware to avoid a branching if statement. Important // for thread divergence. return( 0.5f + 0.5f*tanh(2000.0f*(value-cutOff))); } __global__ static void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Filter off lower points d_weight[i] = d_weight[i] * high_pass_filter(d_pointsTheta[i], lower_angle); //Filter off higher points d_weight[i] = d_weight[i] * low_pass_filter(d_pointsTheta[i], upper_angle); } } } __global__ static void global_aperture(float *d_weight, const float *d_pointsY, const float lower_position, const float upper_position, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Filter off lower points // d_weight[i] = d_weight[i] * high_pass_filter(d_pointsY[i], lower_position); if(d_pointsY[i] < lower_position) d_weight[i] = 0.0; //Filter off higher points //d_weight[i] = d_weight[i] * low_pass_filter(d_pointsY[i], upper_position); if(d_pointsY[i] > upper_position) d_weight[i] = 0.0; // if(isnan(d_weight[i])) //{ // printf("NaN encountered. d_pointsY[i] = %f, lower_pos=%f, upper_pos=%f\n", d_pointsY[i], lower_position, upper_position); //} } } } __global__ static void global_beamstop(float *d_weight, const float *d_pointsY, const float lower_position, const float upper_position, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { if(d_pointsY[i] > lower_position && d_pointsY[i] < upper_position) { d_weight[i] = 0.0; } } } __host__ __device__ inline float device_criticalReflectivity(float mValue) { //Data taken from swiss neutronics. approximates the correct m value using a quadratic fit to their data return(-0.01288f*mValue*mValue+0.98f); } __host__ __device__ inline float device_critical_theta(const float wavln, /**< Wavelength of incident neutrons. */ const float mValue)/**< m value of the surface. */ { float ans; ans = wavln * mValue / thetaCritStandardLambda; ans = degrees2radians(ans); ans = ans * thetaCritNickel; return( ans); } __host__ __device__ float device_reflectivity_slow(const float theta_rads, /**< Angle of incidence in radians. */ const float lambda, const float mValue) /**< m value of reflecting surface. */ { //m=1 critical angle const float thetaCritM1 = device_critical_theta(lambda, 1.0f); //general critical angle const float thetaCrit = device_critical_theta(lambda, mValue); const float dist = fabsf(theta_rads); float attn0; float attnGrad; float ans; if(dist < thetaCritM1) { //Flat at low angles below m=1 ans = device_criticalReflectivity(1.0); } else if(dist < thetaCrit) { //linear decay to the knee value above m=1 attnGrad = (device_criticalReflectivity(mValue) - device_criticalReflectivity(1.0)) / (thetaCrit - thetaCritM1); attn0 = device_criticalReflectivity(1.0) - attnGrad*thetaCritM1; ans = attn0 + attnGrad * dist; } else { ans = 0.0f; } return(ans); } __device__ float device_reflectivity(const float theta_rads, /**< Angle of incidence in radians. */ const float lambda, const float mValue) /**< m value of reflecting surface. */ { //m=1 critical angle const float thetaCritM1 = device_critical_theta(lambda, 1.0f); //general critical angle const float thetaCrit = device_critical_theta(lambda, mValue); const float dist = fabsf(theta_rads); float attn0; float attnGrad; float ans=NICKEL_REFLECTIVITY; if(dist > thetaCritM1) { attnGrad=(device_criticalReflectivity(mValue) - NICKEL_REFLECTIVITY) / (thetaCrit - thetaCritM1); attn0 = NICKEL_REFLECTIVITY - attnGrad*thetaCritM1; ans = attn0 + attnGrad * dist; } //Multiply by low pass above thetaCrit if(dist > thetaCrit) { ans = 0.0f; } return(ans); } __device__ static float device_attenuate_alpha(const float valpha, const float lambda, const float theta, const float mValue) { //Attenuates the opacity of a vertex based on its divergence angle return (valpha * device_reflectivity(theta, lambda, mValue)); } __global__ static void global_sandAllocateWavelength(float *d_lambdaH, const float *d_r1g, const float lambda1, const float deltaLambda, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_lambdaH[i] = lambda1 + d_r1g[i]*deltaLambda; //FMA this later } } __global__ static void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambda, const float *weightH, const float *weightV, const float *d_modflux, const float sourceDeltaLambda, const int numElements) { __shared__ float sharedLambdaHist[100]; int targetBin; int j; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; float element; //Boss thread zeros the shared counter if(tid == 0) { for(j=0; j<100 && j<histSize; j++) { sharedLambdaHist[j] = 0.0f; } } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //targetBin = (int) roundf(-0.5f + (lambda[i] - lambdaMin)/dLambda ); //targetBin = (int) rintf(-0.5f + (lambda[i] - lambdaMin)/dLambda ); targetBin = (int) rintf((lambda[i] - lambdaMin)/dLambda); //This function agrees with VITESS "normalise with binsize = no" //be certain to send non-zero dLambda to this function! element = weightH[i] * weightV[i];// / dLambda; // in units of fractions of trajectory per angstrom //Normalise to wavelength range element = element * sourceDeltaLambda; //in units of fractions of trajectory if(d_modflux != NULL) { element = element * d_modflux[i] / (float)numElements; //in units of neutrons per second } //if(isnan(element)) //element = 0.0; if( (targetBin >= 0) && (targetBin < 100) && (targetBin < histSize) ) { atomicAdd(&sharedLambdaHist[targetBin], element); } } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { for(j=0; j<100 && j<histSize; j++) atomicAdd(&lambdaHist[j], sharedLambdaHist[j]); } } __syncthreads(); } __global__ static void global_arrayMinimum(const float *array, float globalMin[1], const int numElements) { __shared__ float sharedMin; // This function DOES NOT WORK YET! There is a race condition int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; //Boss thread in first block initialises the global memory if(tid == 0 && bid == 0) { globalMin[0] = array[i]; } __syncthreads(); //Boss thread in warp initialises the shared memory if(tid == 0) { sharedMin = array[i]; } __syncthreads(); //Each thread checks it's value against the shared minimum, and overwrites it if necessary if(i < numElements) { //This has to be handled correctly, otherwise there is a race condition //at this point - the if statement is not synchronised and it overwrites if(array[i] < sharedMin) atomicExch(&sharedMin, array[i]); } __syncthreads(); //Boss thread overwrites global total if necessary if(i < numElements); { if(tid == 0) { if(sharedMin < globalMin[0]); atomicExch(&globalMin[0], sharedMin); } } __syncthreads(); } __global__ static void global_arrayMaximum(const float *array, float globalMax[1], const int numElements) { __shared__ float sharedMax; // This function DOES NOT WORK YET! There is a race condition int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; //Boss thread in first block initialises the global memory if(tid == 0 && bid == 0) { globalMax[0] = array[i]; } __syncthreads(); //Boss thread initialises the shared counter if(tid == 0) { sharedMax = array[i]; } __syncthreads(); if(i < numElements) { //This has to be handled correctly, otherwise there is a race condition //at this point - the if statement is not synchronised and it overwrites if(array[i] > sharedMax) { __syncthreads(); atomicExch(&sharedMax, array[i]); } } __syncthreads(); //Boss thread in warp overwrites global total if necessary if(i < numElements); { if(tid == 0) { if(sharedMax > globalMax[0]); { __syncthreads(); atomicExch(&globalMax[0], sharedMax); } } } __syncthreads(); } __global__ static void global_Monitor1D(float *globalHist, const float min, const float dval, int histSize, const float *array, const float *weight, const int numElements) { __shared__ float sharedHist[100]; int targetBin; int j; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) // THIS IS SLOW, we need a faster, slightly more complex way //Boss thread zeros the shared counter if(tid == 0) { for(j=0; j<100 && j<histSize; j++) { sharedHist[j] = 0.0f; } } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //Add horizontal bit //targetBin = roundf( (array[i] - min)/dval ); targetBin = rintf( (array[i] - min)/dval ); atomicAdd(&sharedHist[targetBin], weight[i]); } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { for(j=0; j<100 && j<histSize; j++) atomicAdd(&globalHist[j], sharedHist[j]); } } __syncthreads(); } __global__ static void global_rebinnedPhaseSpace(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) { int targetBinY, targetBinTheta; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //targetBinY = roundf( (d_pointsY[i] - yMin)/dy ); //targetBinTheta = roundf( (d_pointsTheta[i] - thetaMin)/dtheta ); targetBinY = rintf( (d_pointsY[i] - yMin)/dy ); targetBinTheta = rintf( (d_pointsTheta[i] - thetaMin)/dtheta ); if(targetBinY >= 0 && targetBinY < 100 && targetBinY < histSize) { if(targetBinTheta >= 0 && targetBinTheta < 100 && targetBinTheta < histSize) { //if(!isnan(d_weight[i])) atomicAdd(&globalHist[targetBinTheta][targetBinY], d_weight[i]); } } } } __global__ static void global_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; bool finished=false; float mval, mirrorAngle, mirrorY; // The next bit of code loops over all particles until they are no longer // reflected in the mirror(s). The way it is written at the moment is that // it keeps looping over the same particle until it is finished. An // alternative way might be that each thread handles a single reflection // case, and a shared bool keeps all threads going until no particles are // reflected. It might be the same speed, but I think this way is faster, // particularly with CUDA. if(i<numElements) { // Don't try to ignore dead neutrons here - it creates NaNs in d_pointsY[i] //if(d_weight[i] > deadWeight) //{ do { finished=true; /* Reflect in the upper plane? */ if(d_pointsY[i] > mirrorYtop) { mval = mTop; mirrorAngle = mirrorAngleTop; mirrorY = mirrorYtop; finished = false; } /* Are we in the lower plane? */ if(d_pointsY[i] < mirrorYbottom) { mval = mBottom; mirrorAngle = mirrorAngleBottom; mirrorY = mirrorYbottom; finished = false; } /* Do we need to do slow work? */ if(finished == false) { d_weight[i] = device_attenuate_alpha(d_weight[i], d_lambda[i], fabsf(d_pointsTheta[i] - mirrorAngle), mval); d_pointsTheta[i] = 2.0*mirrorAngle - d_pointsTheta[i]; /* reflection in Y */ /* pointsY[i] = mirrorY - (pointsY[i] - mirrorY); */ d_pointsY[i] = 2.0*mirrorY - d_pointsY[i]; } } while (finished == false); } } void Sandman::allocateArrays(void) { /// /// Private function to allocate arrays on the GPU for the instance of the /// sandman class. Must be called by constructors. /// std::cout << "\tAllocating arrays" << std::endl; //Initialise random number generator std::cout << "\t\tCreating random number generator on GPU" << std::endl; checkCudaErrors(hiprandCreateGenerator(&prngGPU, HIPRAND_RNG_PSEUDO_MTGP32)); checkCudaErrors(hiprandSetPseudoRandomGeneratorSeed(prngGPU, seed)); std::cout << "\t\tAllocating array pointers on device" << std::endl; //Allocate device memory for random numbers checkCudaErrors(hipMalloc((void **)&d_r1g, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_r2g, numElements * sizeof(float))); //Allocate device memory for horizontal phase space checkCudaErrors(hipMalloc((void **)&d_pointsYH, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pointsThetaH, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_weightHg, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_lambdag, numElements * sizeof(float))); //Allocate device memory for vertical phase space checkCudaErrors(hipMalloc((void **)&d_pointsYV, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_pointsThetaV, numElements * sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_weightVg, numElements * sizeof(float))); //Allocate device memory for temporary array (bender channel number, other funcs) checkCudaErrors(hipMalloc((void **)&d_tempArray, numElements * sizeof(float))); //Allocate arrays for histograms checkCudaErrors(hipMalloc((void **)&d_histogram2D, 100*100* sizeof(float))); checkCudaErrors(hipMalloc((void **)&d_histogram1D, 100* sizeof(float))); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(hipMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } } Sandman::Sandman(const bool& verbose) { /// /// Constructor, which will generate 100 trajectories and use the standard /// random seed of 777. /// numElements = 100; int nDevices; flux = -1.0; eFlux = -1.0; traj = -1.0; eTraj = -1.0; showCUDAsteps=false; if(verbose) showCUDAsteps=true; displayWelcome(); std::cout << color_yellow << "INITIALISING" << color_reset << std::endl; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("\tDevice Number: %d\n", i); printf("\t\tDevice name: %s\n", prop.name); printf("\t\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\t\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("\t\tPeak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } std::cout << "\tAllocating arrays" << std::endl; allocateArrays(); } Sandman::Sandman(const int nE, const bool& verbose) { /// /// Constructor, which will generate 100 trajectories and use the standard /// random seed of 777. /// @param nE an integer parameter to define how many /// trajectories should be generated. /// \todo Check that the number of /// trajectories does not exceed available GPU memory /// numElements = nE; int nDevices; flux = -1.0; eFlux = -1.0; traj = -1.0; eTraj = -1.0; showCUDAsteps=false; if(verbose) showCUDAsteps=true; displayWelcome(); std::cout << color_yellow << "INITIALISING" << color_reset << std::endl; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("\tDevice Number: %d\n", i); printf("\t\tDevice name: %s\n", prop.name); printf("\t\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\t\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("\t\tPeak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } std::cout << "\tAllocating arrays" << std::endl; allocateArrays(); } Sandman::~Sandman(void) { /// /// Destructor /// First launches histogram code, then cleans up memory. /// std::cout << color_yellow << "CLEANING UP" << color_reset << std::endl; std::cout << "\tShutting down sandman." << std::endl; if(d_lambdaMonHist != NULL) { executeLambdaMonitor(); checkCudaErrors(hipFree(d_lambdaMonHist)); } if(d_pointsThetaHsnapshot != NULL && d_pointsYHsnapshot != NULL) { executePhaseSpaceMapH(); checkCudaErrors(hipFree(d_pointsThetaHsnapshot)); checkCudaErrors(hipFree(d_pointsYHsnapshot)); } if(d_pointsThetaVsnapshot != NULL && d_pointsYVsnapshot != NULL) { executePhaseSpaceMapV(); checkCudaErrors(hipFree(d_pointsThetaVsnapshot)); checkCudaErrors(hipFree(d_pointsYVsnapshot)); } std::cout << "\tFreeing up device memory" << std::endl; if(d_r1g != NULL) checkCudaErrors(hipFree(d_r1g)); if(d_r2g != NULL) checkCudaErrors(hipFree(d_r2g)); if(d_pointsYH != NULL) checkCudaErrors(hipFree(d_pointsYH)); if(d_pointsThetaH != NULL) checkCudaErrors(hipFree(d_pointsThetaH)); if(d_pointsYV != NULL) checkCudaErrors(hipFree(d_pointsYV)); if(d_pointsThetaV != NULL) checkCudaErrors(hipFree(d_pointsThetaV)); if(d_lambdag != NULL) checkCudaErrors(hipFree(d_lambdag)); if(d_weightHg != NULL) checkCudaErrors(hipFree(d_weightHg)); if(d_weightVg != NULL) checkCudaErrors(hipFree(d_weightVg)); if(d_tempArray != NULL) checkCudaErrors(hipFree(d_tempArray)); if(d_histogram1D != NULL) checkCudaErrors(hipFree(d_histogram1D)); if(d_histogram2D != NULL) checkCudaErrors(hipFree(d_histogram2D)); if(d_modFlux != NULL) checkCudaErrors(hipFree(d_modFlux)); std::cout << "\tShutting down random generator" << std::endl; checkCudaErrors(hiprandDestroyGenerator(prngGPU)); report(); } void Sandman::report(void) { /// /// Generates report of results /// std::cout << color_yellow << "FINAL REPORT" << color_reset << std::endl; std::cout << "\tNeutron counter:" << std::endl; std::cout << "\t\tGot " << color_green << flux << color_reset << " neutrons per second total current" << std::endl; if(traj > 0.0f) { std::cout << "\tTrajectory counter:" << std::endl; std::cout << "\t\tGot " << traj << " trajectories having started with " << numElements << std::endl; std::cout << "\t\tEfficiency = " << 100*traj/numElements << "%" << std::endl; } } void Sandman::generateBothRandomArrays(void) { /// /// Generates random numbers on both array buffer. Use case: subsequent /// random generation of theta and y values in phase space map. /// generateRandomArray(d_r1g); generateRandomArray(d_r2g); } void Sandman::generateOneRandomArray(void) { /// /// Generates random numbers on only the first array buffers. Use case: /// subsequent generation of wavelength values. /// generateRandomArray(d_r1g); } void Sandman::sandCountNeutrons(void) { /// /// Integrates over all trajectories to estimate the total neutron current. /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// float *d_nSum; float h_nSum[1]; //count, error that way we have one memory transfer for everything std::cout << color_yellow << "NEUTRON COUNTER" << color_reset << std::endl; checkCudaErrors(hipMalloc((void **)&d_nSum, sizeof(float))); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) { printf("\tCUDA kernel count neutrons with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); } // Zero the count on the host h_nSum[0] = 0.0f; // Copy the zero total to device memory checkCudaErrors(hipMemcpy(d_nSum, h_nSum, sizeof(float), hipMemcpyHostToDevice)); // static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float *modFlux, const int numElements) hipLaunchKernelGGL(( global_countNeutrons), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_nSum, d_weightHg, d_weightVg, sourceDeltaLambda, d_modFlux, numElements); //Copy total out of device memory for host reporting checkCudaErrors(hipMemcpy(h_nSum, d_nSum, sizeof(float), hipMemcpyDeviceToHost)); flux = *h_nSum; // eFlux = *d_nSum; } void Sandman::sandCountNeutronsSquareCorrected() { /// /// Integrates over all trajectories to estimate the total neutron current, /// and divides by Pi/2 to normalise for square window beam area /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// sandCountNeutrons(); flux = flux / (PI_FLOAT/4.0f); std::cout << "Square beam corrected neutron counter:" << std::endl; std::cout << " Got " << flux << " pseudo neutrons (weight product from both planes)" << std::endl; } void Sandman::sandCountNeutronsCircleCorrected() { /// /// Integrates over all trajectories to estimate the total neutron current, /// and divides by Pi/2 to normalise for square window beam area /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// sandCountNeutrons(); flux = flux / (PI_FLOAT/2.0f); std::cout << "Circular beam corrected neutron counter:" << std::endl; std::cout << " Got " << flux << " pseudo neutrons (weight product from both planes)" << std::endl; } void Sandman::sandCountTrajectories(void) { /// /// Integrates over all trajectories to estimate the total neutron current. /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// float *d_nSum; float h_nSum[1]; //count, error that way we have one memory transfer for everything checkCudaErrors(hipMalloc((void **)&d_nSum, sizeof(float))); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel count neutrons with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // Zero the count on the host h_nSum[0] = 0.0f; // Copy the zero total to device memory checkCudaErrors(hipMemcpy(d_nSum, h_nSum, sizeof(float), hipMemcpyHostToDevice)); printf("Counting up phase space\n"); // static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float *modFlux, const int numElements) hipLaunchKernelGGL(( global_countTrajectories), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_nSum, d_weightHg, d_weightVg, numElements); //Copy total out of device memory for host reporting checkCudaErrors(hipMemcpy(h_nSum, d_nSum, sizeof(float), hipMemcpyDeviceToHost)); traj = *h_nSum; // eFlux = *d_nSum; } void Sandman::lambdaMonitor(const std::string setFilename, const float setLambdaMin, const float setLambdaMax, int setLambdaHistSize) { /// /// Sets up a wavelength spectrum histogram to be completed by the destructor. /// /// @param setFilename std::string name of file to use for output of the histogram. /// @param setLambdaMin the minimum wavelength value to use /// @param setLambdaMax the maximum wavelength value to use /// @param setLambdaHistSize the number of bins in the histogram (max 100) /// std::string manipulatedFilename; lambdaMin = setLambdaMin; lambdaMax = setLambdaMax; if(abs(lambdaMax - lambdaMin) < 0.0001) { //That would produce an error, make wavelength band 1.0 angstroms lambdaMax = lambdaMin + 1.0f; } lambdaHistSize = setLambdaHistSize; if(lambdaHistSize > 100) { lambdaHistSize = 100; } manipulatedFilename = setFilename; manipulatedFilename = remove_extension(manipulatedFilename); manipulatedFilename = manipulatedFilename + "Lambda1D.csv"; lambdaFileName = manipulatedFilename; //Allocate arrays. The actual lambda monitor is called in the destructor //once the trajectory weights are known checkCudaErrors(hipMalloc((void **)&d_lambdaMonHist, 100* sizeof(float))); if(d_lambdaMonHist == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate array d_lambdaMonHist" << std::endl; exit(1); } } void Sandman::executeLambdaMonitor(void) { /// /// Performs the wavelength histogram calculation set up by lambdaMonitor, /// when called by the destructor. /// float *h_lambdaHist=NULL; float runningLambda; float lambdaIntegral=0.0; if(lambdaHistSize > 100) lambdaHistSize = 100; int i; const float dLambda=(lambdaMax-lambdaMin) / (float)lambdaHistSize; std::ofstream outfile; std::cout << color_yellow << "LAMBDA HISTOGRAM CONSTRUCTION" << color_reset << std::endl; outfile.open(lambdaFileName.c_str()); if(outfile.fail()) { std::cerr << "ERROR opening file " << lambdaFileName << std::endl; return; } h_lambdaHist = (float*) malloc(lambdaHistSize*sizeof(float)); if(h_lambdaHist == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " allocating host memory in executeLambdaMonitor" << std::endl; exit(1); } if(d_histogram1D == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " device memory pointer is NULL in executeLambdaMonitor" << std::endl; exit(1); } #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif // Zero the count histogram zeroHistogram1D(); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel lambdamonitor[" << lambdaHistSize << "] with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambdaH, const float *lambdaV, const float *weightH, const float *weightV, const int numElements) hipLaunchKernelGGL(( global_lambdaMonitor), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_histogram1D, lambdaMin, dLambda, lambdaHistSize, d_lambdag, d_weightHg, d_weightVg, d_modFlux, sourceDeltaLambda, numElements); //Copy total out of device memory for host reporting checkCudaErrors(hipMemcpy(h_lambdaHist, d_histogram1D, lambdaHistSize*sizeof(float), hipMemcpyDeviceToHost)); //Write out file from host memory runningLambda = lambdaMin; for(i=0; i < lambdaHistSize; i++) { outfile << runningLambda << "," << h_lambdaHist[i] << std::endl; runningLambda = runningLambda + dLambda; lambdaIntegral += h_lambdaHist[i]*dLambda; } outfile.close(); std::cout << "\tLambda monitor file written. Integral current = " << lambdaIntegral << " n/s" << std::endl; free(h_lambdaHist); } void Sandman::sandPosMonitorH(const std::string filename, const float min, const float max, int histSize) { /// /// Sets up a position histogram to be completed by the destructor. /// /// @param filename std::string name of file to use for output of the /// histogram. /// @param min the minimum position value to use /// @param max the maximum position value to use /// @param histSize the number of bins in the histogram (max 100) /// /// \todo Complete this function, like the lambdahistrogram function /// float *h_hist; float runningX; if(histSize > 100) histSize = 100; int i; const float dval=fabs(max-min) / (float)histSize; std::ofstream outfile; outfile.open(filename.c_str()); if(outfile.fail()) { std::cerr << "ERROR opening file " << filename << std::endl; return; } h_hist = (float*) malloc(histSize*sizeof(float)); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("CUDA posMonitorH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); printf("H position monitor\n"); // Zero the count histogram zeroHistogram1D(); //void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambdaH, const float *lambdaV, const float *weightH, const float *weightV, const int numElements) hipLaunchKernelGGL(( global_Monitor1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_histogram1D, min, dval, histSize, d_pointsYH, d_weightHg, numElements); //Copy total out of device memory for host reporting checkCudaErrors(hipMemcpy(h_hist, d_histogram1D, histSize*sizeof(float), hipMemcpyDeviceToHost)); //Write out file from host memory runningX = min; for(i=0; i<histSize; i++) { outfile << runningX << "," << h_hist[i] << std::endl; runningX = runningX + dval; } outfile.close(); free(h_hist); } void Sandman::phaseSpaceMapH(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) { //Create snapshots strcpy(filenameSnapshot, filename); yminSnapshot = ymin; ymaxSnapshot = ymax; thetaMinSnapshot = thetaMin; thetaMaxSnapshot = thetaMax; if( d_pointsThetaHsnapshot != NULL || d_pointsYHsnapshot != NULL ) { std::cout << color_red << "ERROR:" << color_reset << " only one type of beam monitor snapshot can be used at a time (lambda & horizontal phase space monitors use same snapshot arrays)" << std::endl; exit(1); } checkCudaErrors(hipMalloc((void **)&d_pointsThetaHsnapshot, numElements*sizeof(float))); if(d_pointsThetaHsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapH for theta" << std::endl; exit(1); } checkCudaErrors(hipMalloc((void **)&d_pointsYHsnapshot, numElements*sizeof(float))); if(d_pointsYHsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapH for Y" << std::endl; exit(1); } if(d_pointsYH == NULL || d_pointsThetaH == NULL) { std::cerr << "OMG: Copying from unallocated array" << std::endl; exit(1); } //If we get here, then the memory was allocated just fine. int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel copyArray for phaseSpaceMapH with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //Snapshot negative theta hipLaunchKernelGGL(( global_copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaH, d_pointsThetaHsnapshot, numElements, true); //Snapshot positive Y hipLaunchKernelGGL(( global_copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_pointsYHsnapshot, numElements, false); } void Sandman::phaseSpaceMapV(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) { //Create snapshots strcpy(filenameSnapshot, filename); yminSnapshot = ymin; ymaxSnapshot = ymax; thetaMinSnapshot = thetaMin; thetaMaxSnapshot = thetaMax; if( d_pointsThetaVsnapshot != NULL || d_pointsYVsnapshot != NULL ) { std::cout << color_red << "ERROR:" << color_reset << " only one type of 2D beam monitor snapshot can be created" << std::endl; exit(1); } checkCudaErrors(hipMalloc((void **)&d_pointsThetaVsnapshot, numElements*sizeof(float))); if(d_pointsThetaVsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapV for theta" << std::endl; exit(1); } checkCudaErrors(hipMalloc((void **)&d_pointsYVsnapshot, numElements*sizeof(float))); if(d_pointsYVsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapV for Y" << std::endl; exit(1); } if(d_pointsYH == NULL || d_pointsThetaH == NULL) { std::cerr << "OMG: Trying to copy from an unallocated array" << std::endl; exit(1); } //If we get here, then the memory was allocated just fine. int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel copyArray for phaseSpaceMapV with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //Snapshot negative theta hipLaunchKernelGGL(( global_copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaV, d_pointsThetaVsnapshot, numElements, true); //Snapshot positive Y hipLaunchKernelGGL(( global_copyArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_pointsYVsnapshot, numElements, false); } void Sandman::executePhaseSpaceMapH(void) { /// /// Computes a full phase space map in the horizontal plane /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// @param ymin the minimum position value to use (m) /// /// @param ymax the maximum position value to use (m) /// /// @param thetaMin the minimum divergence value to use (radians) /// /// @param thetaMax the maximum divergence value to use (radians) /// float *h_histogram=NULL; float *d_boundary=NULL; float runningY = yminSnapshot; float runningTheta = thetaMinSnapshot; float dy = fabs(ymaxSnapshot-yminSnapshot)/100.0f; float dtheta = fabs(thetaMaxSnapshot - thetaMinSnapshot)/100.0f; std::cout << color_yellow << "HORIZONTAL ACCEPTANCE DIAGRAM CONSTRUCTION" << color_reset << std::endl; h_histogram = (float*) malloc(100*100*sizeof(float)); if(h_histogram == NULL) { std::cerr << "Error allocating host memory in phaseSpaceMapH" << std::endl; exit(1); } std::ofstream dataFile; int i,j; // Allocate device float for min, max etc checkCudaErrors(hipMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapH for d_boundary" << std::endl; exit(1); } // Zero the count histogram zeroHistogram2D(); #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "\tCUDA kernel rebinnedPhaseSpace with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) hipLaunchKernelGGL(( global_rebinnedPhaseSpace), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, (float (*)[100])d_histogram2D, d_pointsYHsnapshot, d_pointsThetaHsnapshot, yminSnapshot, dy, thetaMinSnapshot, dtheta, 100, d_weightHg, numElements); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif //Get data from GPU checkCudaErrors(hipMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), hipMemcpyDeviceToHost)); dataFile.open(filenameSnapshot); if(!dataFile.good()) { std::cerr << "ERROR opening " << filenameSnapshot << " for writing" << std::endl; return; } else std::cout << "\tWriting 2D monitor file " << filenameSnapshot << std::endl; for(i=0; i<100; i++) { for(j=0; j<100; j++) { runningTheta = thetaMinSnapshot + dtheta * (float) j; runningY = yminSnapshot + dy * (float) i; //[theta][y] dataFile << runningTheta << "," << runningY << "," << h_histogram[j*100+i] << std::endl; } } dataFile.close(); free(h_histogram); if(d_boundary != NULL) checkCudaErrors(hipFree(d_boundary)); } void Sandman::executePhaseSpaceMapV(void) { /// /// Computes a full phase space map in the vertical plane /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// @param ymin the minimum position value to use (m) /// /// @param ymax the maximum position value to use (m) /// /// @param thetaMin the minimum divergence value to use (radians) /// /// @param thetaMax the maximum divergence value to use (radians) /// float *h_histogram=NULL; float *d_boundary=NULL; float runningY = yminSnapshot; float runningTheta = thetaMinSnapshot; float dy = fabs(ymaxSnapshot-yminSnapshot)/100.0f; float dtheta = fabs(thetaMaxSnapshot - thetaMinSnapshot)/100.0f; std::cout << color_yellow << "HORIZONTAL ACCEPTANCE DIAGRAM CONSTRUCTION" << color_reset << std::endl; h_histogram = (float*) malloc(100*100*sizeof(float)); if(h_histogram == NULL) { std::cerr << "Error allocating host memory in phaseSpaceMapV" << std::endl; exit(1); } std::ofstream dataFile; int i,j; // Allocate device float for min, max etc checkCudaErrors(hipMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapV for d_boundary" << std::endl; exit(1); } // Zero the count histogram zeroHistogram2D(); #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "\tCUDA kernel rebinnedPhaseSpaceV with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) hipLaunchKernelGGL(( global_rebinnedPhaseSpace), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, (float (*)[100])d_histogram2D, d_pointsYVsnapshot, d_pointsThetaVsnapshot, yminSnapshot, dy, thetaMinSnapshot, dtheta, 100, d_weightVg, numElements); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif //Get data from GPU checkCudaErrors(hipMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), hipMemcpyDeviceToHost)); dataFile.open(filenameSnapshot); if(!dataFile.good()) { std::cerr << "ERROR opening " << filenameSnapshot << " for writing" << std::endl; return; } else std::cout << "\tWriting 2D monitor file " << filenameSnapshot << std::endl; for(i=0; i<100; i++) { for(j=0; j<100; j++) { runningTheta = thetaMinSnapshot + dtheta * (float) j; runningY = yminSnapshot + dy * (float) i; //[theta][y] dataFile << runningTheta << " " << runningY << " " << h_histogram[j*100+i] << std::endl; } } dataFile.close(); free(h_histogram); if(d_boundary != NULL) checkCudaErrors(hipFree(d_boundary)); } void Sandman::phaseSpaceMapH(const char *filename) { /// /// Computes a full phase space map in the horizontal plane, autodetecting /// the boundaries. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// // float *h_histogram=NULL; float *d_boundary=NULL; // float runningY; // float runningTheta; float dy; float dtheta; float thLo, thHi, yLo, yHi; // h_histogram = (float*) malloc(100*100*sizeof(float)); // if(h_histogram == NULL) // { // std::cerr << "Error allocating host memory in phaseSpaceMapH" << std::endl; // exit(1); // } // std::ofstream dataFile; // int i,j; // Allocate device float for min, max etc checkCudaErrors(hipMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapH for d_boundary" << std::endl; exit(1); } //Autodetect minimum and maximum theta std::cout << " Phase space theta minimum:" << std::endl; thLo = arrayMinimum(d_pointsThetaH, d_boundary); std::cout << " Phase space theta maximum:" << std::endl; thHi = arrayMaximum(d_pointsThetaH, d_boundary); dtheta = fabs(thLo-thHi)/100.0f; //Pad by one bin thLo = thLo - dtheta; thHi = thHi + dtheta; //Autodetect minimum and maximum y std::cout << " Phase space Y minimum:" << std::endl; yLo = arrayMinimum(d_pointsYH, d_boundary); std::cout << " Phase space Y maximum:" << std::endl; yHi = arrayMaximum(d_pointsYH, d_boundary); //Pad by one bin dy = fabs(yHi - yLo)/100.0f; yLo = yLo - dy; yHi = yHi + dy; //Pipe this now through the other function //void Sandman::phaseSpaceMapH(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) phaseSpaceMapH(filename, yLo, yHi, thLo, thHi); // // Zero the count histogram // zeroHistogram2D(); // #ifdef DEBUG // hipError_t errSync = hipGetLastError(); // hipError_t errAsync = hipDeviceSynchronize(); // if (errSync != hipSuccess) // std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; // if (errAsync != hipSuccess) // std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; // #endif // printf("2D histogram phase space H...\n\n"); // int threadsPerBlock = 256; // int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; // std::cout << "CUDA kernel rebinnedPhaseSpaceH, auto boundary detect, with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; // //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) // // global_rebinnedPhaseSpaceH<<<blocksPerGrid, threadsPerBlock>>> // // ((float (*)[100])d_histogram2D, d_pointsYH, d_pointsThetaH, ymin, dy, thetaMin, dtheta, 100, d_weightHg, numElements); // global_rebinnedPhaseSpaceH<<<blocksPerGrid, threadsPerBlock>>> // ((float (*)[100])d_histogram2D, d_pointsYH, d_pointsThetaH, yLo, dy, thLo, dtheta, 100, d_weightHg, numElements); // #ifdef DEBUG // if (errSync != hipSuccess) // std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; // if (errAsync != hipSuccess) // std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; // #endif // //Get data from GPU // checkCudaErrors(hipMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), hipMemcpyDeviceToHost)); // dataFile.open(filename); // if(!dataFile.good()) // { // std::cerr << "ERROR opening " << filename << " for writing" << std::endl; // return; // } // else // std::cout << "Writing 2D monitor file " << filename << std::endl; // for(i=0; i<100; i++) // { // for(j=0; j<100; j++) // { // runningTheta = thLo + dtheta * (float) j; // runningY = yLo + dy * (float) i; // //[theta][y] // dataFile << runningTheta << " " << runningY << " " << h_histogram[j*100+i] << std::endl; // } // } // dataFile.close(); // free(h_histogram); if(d_boundary != NULL) checkCudaErrors(hipFree(d_boundary)); } void Sandman::phaseSpaceMapHCPU(const char *filename) { /// /// Computes a full phase space map in the horizontal plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsY=NULL; float *h_pointsTheta=NULL; float *h_weight=NULL; int dumped=0; h_pointsY = (float*) malloc(numElements*sizeof(float)); if(h_pointsY == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsY" << std::endl; exit(1); } h_pointsTheta = (float*) malloc(numElements*sizeof(float)); if(h_pointsTheta == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsTheta" << std::endl; exit(1); } h_weight = (float*) malloc(numElements*sizeof(float)); if(h_weight == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_weight" << std::endl; exit(1); } std::ofstream dataFile; int i; //Get data from GPU sandGetPhaseSpaceH(h_pointsY, h_pointsTheta, h_weight); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the output to 20000 points - this could be a shit load of data for(i=0; i<numElements && dumped<200000; i++) { if(h_weight[i] > deadWeight) { dataFile << h_pointsTheta[i]*180.0f/PI_FLOAT << "," << h_pointsY[i] << "," << h_weight[i] << std::endl; dumped++; } } dataFile.close(); free(h_pointsY); free(h_pointsTheta); free(h_weight); } void Sandman::phaseSpaceMapVCPU(const char *filename) { /// /// Computes a full phase space map in the vertical plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsY=NULL; float *h_pointsTheta=NULL; float *h_weight=NULL; h_pointsY = (float*) malloc(numElements*sizeof(float)); if(h_pointsY == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsY" << std::endl; exit(1); } h_pointsTheta = (float*) malloc(numElements*sizeof(float)); if(h_pointsTheta == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsTheta" << std::endl; exit(1); } h_weight = (float*) malloc(numElements*sizeof(float)); if(h_weight == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_weight" << std::endl; exit(1); } std::ofstream dataFile; int i; //Get data from GPU sandGetPhaseSpaceV(h_pointsY, h_pointsTheta, h_weight); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the output to 200000 points - this could be a shit load of data for(i=0; i<numElements && i<200000; i++) { if(h_weight[i] > deadWeight) dataFile << h_pointsTheta[i]*180.0f/PI_FLOAT << "," << h_pointsY[i] << "," << h_weight[i] << std::endl; } dataFile.close(); free(h_pointsY); free(h_pointsTheta); free(h_weight); } void Sandman::debugPosPosCPU(const char *filename) { /// /// Computes a full phase space map in the vertical plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsH=NULL; float *h_weightH=NULL; float *h_pointsV=NULL; float *h_weightV=NULL; h_pointsH = (float*) malloc(numElements*sizeof(float)); if(h_pointsH == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_pointsH" << std::endl; exit(1); } h_pointsV = (float*) malloc(numElements*sizeof(float)); if(h_pointsV == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_pointsV" << std::endl; exit(1); } h_weightH = (float*) malloc(numElements*sizeof(float)); if(h_weightH == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_weightH" << std::endl; exit(1); } h_weightV = (float*) malloc(numElements*sizeof(float)); if(h_weightV == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_weightV" << std::endl; exit(1); } std::ofstream dataFile; int i; int dumped=0; //Get data from GPU sandDebugPosPos(h_pointsH, h_weightH, h_pointsV, h_weightV); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the function to considering 100000 points - this could be a shit load of data for(i=0; i<numElements && dumped < 100000; i++) { if(h_weightH[i] > deadWeight && h_weightV[i] > deadWeight) { dataFile << h_pointsH[i] << "\t" << h_pointsV[i] << "\t" << h_weightH[i]*h_weightV[i] << std::endl; dumped++; } } dataFile.close(); free(h_pointsH); free(h_pointsV); free(h_weightH); free(h_weightV); } void Sandman::sandSkewCUDA(const float distance_m) { /// /// Calls the CUDA kernels to compute a skew operation on both phase space /// maps to propagate the beam a certain distance within the small angle /// limit. /// /// @param distance_m the distance the beam must propagate in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel skew with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //void device_sandSkewCUDA(float *d_pointsY, const float *d_pointsTheta, float *d_weight, const float distance_m, const int numElements) hipLaunchKernelGGL(( global_sandSkewCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_pointsThetaH, distance_m, numElements); hipLaunchKernelGGL(( global_sandSkewCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_pointsThetaV, distance_m, numElements); } void Sandman::sandCollimateCUDA(const float divergenceH, const float divergenceV) { /// /// Calls the CUDA kernels to compute a collimation operation, setting the /// weight to zero on trajectories falling outside the divergence window /// requested. /// /// @param divergenceH the half width divergence limit in the horizontal plane (radians) /// /// @param divergenceV the half width divergence limit in the vertical plane (radians) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel collimation at %f and %f with %d blocks of %d threads\n", divergenceH, divergenceV, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_collimation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightHg, d_pointsThetaH, -fabs(divergenceH), fabs(divergenceH), numElements); hipLaunchKernelGGL(( global_collimation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightVg, d_pointsThetaV, -fabs(divergenceV), fabs(divergenceV), numElements); } //////////////////////////////////////// // // Apertures // //////////////////////////////////////// void Sandman::sandApertureV(const float window_height) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_height the full height of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of height %f with %d blocks of %d threads\n", window_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_aperture), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightVg, d_pointsYV, -fabs(window_height/2.0f), fabs(window_height/2.0f), numElements); } void Sandman::sandApertureH(const float window_width) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of width %f with %d blocks of %d threads\n", window_width, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_aperture), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightHg, d_pointsYH, -fabs(window_width/2.0f), fabs(window_width/2.0f), numElements); } void Sandman::sandApertureCUDA(const float window_width, const float window_height, bool silent) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// /// @param window_height the full height of the window in metres. /// if(!silent) { std::cout << color_yellow << "APERTURE MASK" << color_reset << std::endl; std::cout << "\twidth = " << window_width << std::endl; std::cout << "\theight = " << window_height << std::endl; } else { std::cout << "Optical unit entrance mask" << std::endl; std::cout << "\twidth = " << window_width << std::endl; std::cout << "\theight = " << window_height << std::endl; } int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of width %f and height %f with %d blocks of %d threads\n", window_width, window_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_aperture), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightHg, d_pointsYH, -fabs(window_width/2.0f), fabs(window_width/2.0f), numElements); hipLaunchKernelGGL(( global_aperture), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightVg, d_pointsYV, -fabs(window_height/2.0f), fabs(window_height/2.0f), numElements); } ///////////////////////////////// // // Beamstops // ///////////////////////////////// void Sandman::sandBeamstopV(const float beamstop_height) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling inside the position window /// requested. /// /// @param beamstop_height the full height of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of height %f with %d blocks of %d threads\n", beamstop_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_beamstop), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightVg, d_pointsYV, -fabs(beamstop_height/2.0f), fabs(beamstop_height/2.0f), numElements); } void Sandman::sandBeamstopH(const float beamstop_width) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling inside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of width %f with %d blocks of %d threads\n", beamstop_width, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_beamstop), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightHg, d_pointsYH, -fabs(beamstop_width/2.0f), fabs(beamstop_width/2.0f), numElements); } void Sandman::sandBeamstopCUDA(const float beamstop_width, const float beamstop_height) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// /// @param window_height the full height of the window in metres. /// std::cout << color_yellow << "BEAMSTOP" << color_reset << std::endl; std::cout << "\twidth = " << beamstop_width << std::endl; std::cout << "\theight = " << beamstop_height << std::endl; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of width %f and height %f with %d blocks of %d threads\n", beamstop_width, beamstop_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) hipLaunchKernelGGL(( global_beamstop), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightHg, d_pointsYH, -fabs(beamstop_width/2.0f), fabs(beamstop_width/2.0f), numElements); hipLaunchKernelGGL(( global_beamstop), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_weightVg, d_pointsYV, -fabs(beamstop_height/2.0f), fabs(beamstop_height/2.0f), numElements); } ///////////////////////////////// // // Moderators // ///////////////////////////////// void Sandman::sandModerator(const float width, const float height, const float hoffset, const float voffset, const float temp, const float num) { /// /// Calls the CUDA kernels to compute a single moderator window, which sets /// the weight to zero on trajectories falling outside the position window /// requested, and calculates the neutron current represented by the /// trajectory. /// /// @param width the width of the moderator in metres /// /// @param height the height of the moderator in metres /// /// @param hoffset the perpendicular horizontal offset of the moderator /// (left is positive, imagined from a view top down with the moderator at /// the bottom and the sample at the top, relative to the beam axis centre /// at the guide entrance. /// /// @param voffset the perpendicular vertical offset of the moderator (up is /// positive, imagined from a side view with the moderator on the left and /// the sample to the right, relative to the beam axis centre at the guide /// entrance. /// /// @param temp the characteristic temperature of the maxwellian distribution (kelvin) /// /// @param num the characteristic brightness of the maxwellian distribution /// (neutrons per second per cm2 per steradian per angstrom) /// /// @note the maxwellian distribution calculation is the same used in MCSTAS /// (and nads). VITESS uses a different definition of brightness and solid /// angle. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandModerator of width %f and height %f with %d blocks of %d threads\n", width, height, blocksPerGrid, threadsPerBlock); //static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) hipLaunchKernelGGL(( global_sandModerator1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements, width, hoffset, temp, num); hipLaunchKernelGGL(( global_sandModerator1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements, width, hoffset, temp, num); } void Sandman::sandBrillianceTransferModerator(const float width, const float height, const float hoffset, const float voffset) { /// /// Calls the CUDA kernels to compute a single moderator window, /// which sets the weight to zero on trajectories falling outside /// the position window requested, and otherwise scores the neutron /// at its transmission weight. /// /// @param width the width of the moderator in metres /// /// @param height the height of the moderator in metres /// /// @param hoffset the perpendicular horizontal offset of the moderator /// (left is positive, imagined from a view top down with the moderator at /// the bottom and the sample at the top, relative to the beam axis centre /// at the guide entrance. /// /// @param voffset the perpendicular vertical offset of the moderator (up is /// positive, imagined from a side view with the moderator on the left and /// the sample to the right, relative to the beam axis centre at the guide /// entrance. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandBrillianceTransferModerator of width %f and height %f with %d blocks of %d threads\n", width, height, blocksPerGrid, threadsPerBlock); //static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) hipLaunchKernelGGL(( global_sandBrillianceTransferModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements, width, hoffset); hipLaunchKernelGGL(( global_sandBrillianceTransferModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements, width, hoffset); } void Sandman::sandILLHCSModerator(void) { /// /// A tool to call a standard moderator kernel providing a triple maxwellian /// moderator matching the ILL horizontal cold source dimensions, based on /// the work of E. Farhi in 2008-2009 to calculate the absolute brightness /// via extrapolation. This benchmark moderator was used in the NADS work, /// so is a useful cross-check. /// sandApertureCUDA(0.186, 0.186, true); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandILLHCSModerator with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(hipMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } //global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) hipLaunchKernelGGL(( global_sandILLHCSModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements); hipLaunchKernelGGL(( global_sandILLHCSModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements); } void Sandman::sandPSIModerator(void) { /// /// A tool to call a standard moderator kernel providing a triple maxwellian /// moderator matching the ILL horizontal cold source dimensions, based on /// the work of E. Farhi in 2008-2009 to calculate the absolute brightness /// via extrapolation. This benchmark moderator was used in the NADS work, /// so is a useful cross-check. /// sandApertureCUDA(0.3, 0.3, true); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandILLHCSModerator with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(hipMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } //global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) hipLaunchKernelGGL(( global_sandPSIModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements); hipLaunchKernelGGL(( global_sandPSIModerator), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements); } void Sandman::sandReflectionH(const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom) { /// /// Calls the CUDA kernels to compute a single channel guide reflection in /// the horizontal plane /// /// @param mirrorYtop upper mirror surface in phase space (since this is horizontal, top = left) in metres /// /// @param mirrorYbottom lower mirror surface in phase space (since this is horizontal, bottom = right) in metres /// /// @param mirrorAngleTop angle of inclination of upper mirror surface (radians) /// /// @param mirrorAngleBottom angle of inclination of lower mirror surface (radians) /// /// @param mTop supermirror m value of upper mirror /// /// @param mBottom supermirror m value of lower mirror /// /// /// @note the maths from this operation is a carbon copy of the nads code /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel reflection with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); /* void device_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mValue, const int numElements) */ hipLaunchKernelGGL(( global_sandReflection), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_pointsThetaH, d_lambdag, d_weightHg, mirrorYtop, mirrorYbottom, mirrorAngleTop, mirrorAngleBottom, mTop, mBottom, numElements); } void Sandman::sandReflectionV(const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom) { /// /// Calls the CUDA kernels to compute a single channel guide reflection in /// the vertical plane /// /// @param mirrorYtop upper mirror surface in phase space in metres /// /// @param mirrorYbottom lower mirror surface in phase space in metres /// /// @param mirrorAngleTop angle of inclination of upper mirror surface (radians) /// /// @param mirrorAngleBottom angle of inclination of lower mirror surface (radians) /// /// @param mTop supermirror m value of upper mirror /// /// @param mBottom supermirror m value of lower mirror /// /// /// @note the maths from this operation is a carbon copy of the nads code /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel reflection with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); /* void device_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mValue, const int numElements) */ hipLaunchKernelGGL(( global_sandReflection), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_pointsThetaV, d_lambdag, d_weightVg, mirrorYtop, mirrorYbottom, mirrorAngleTop, mirrorAngleBottom, mTop, mBottom, numElements); } void Sandman::sandRotation(const float angleH, const float angleV) { /// /// Calls the CUDA kernels to shift both horizontal and vertical phase spaces in the theta plane (rotation of beam) /// /// @param angleH horizontal angle of beam rotation (radians) /// /// @param angleV vertical angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotation with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) hipLaunchKernelGGL(( global_rotation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaH, angleH, numElements); hipLaunchKernelGGL(( global_rotation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaV, angleV, numElements); } void Sandman::sandRotationH(const float angleH) { /// /// Calls the CUDA kernel to shift the horizontal phase space in the theta plane (rotation of beam) /// /// @param angleH horizontal angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotationH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) hipLaunchKernelGGL(( global_rotation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaH, angleH, numElements); } void Sandman::sandRotationV(const float angleV) { /// /// Calls the CUDA kernel to shift the vertical phase space in the theta plane (rotation of beam) /// /// @param angleV vertical angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotationV with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) hipLaunchKernelGGL(( global_rotation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsThetaV, angleV, numElements); } void Sandman::sandTranslationH(const float distance) { /// /// Calls the CUDA kernel to shift the horizontal phase space in the y plane (shift of beam axis) /// /// @param distance horizontal shift of beam (metres) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel translationH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) hipLaunchKernelGGL(( global_translation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, distance, numElements); } void Sandman::sandTranslationV(const float distance) { /// /// Calls the CUDA kernel to shift the vertical phase space in the y plane (shift of beam axis) /// /// @param distance vertical shift of beam (metres) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel translationV with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) hipLaunchKernelGGL(( global_translation), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, distance, numElements); } void Sandman::sandRollPhaseSpace(const float theta) { /// /// Calls the CUDA kernel to rotate the beam around its own axis, mixing the phase space in both planes /// /// @param theta rotation angle (degrees) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rollPhaseSpace with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) hipLaunchKernelGGL(( global_roll_phase_space), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_pointsThetaH, d_weightHg, d_pointsYV, d_pointsThetaV, d_weightVg, theta, numElements); } void Sandman::sandFreeSpaceCUDA(const float distance, const bool& verbose) { /// /// Free space is another name for skew operation. This models the flight /// of a neutron beam in the small angle limit by skewing the phase space. /// /// @param distance distance to transport the neutron beam (metres) /// //This could be a sub-module, so only display if the user explicitly calls //this function without flagging the verbose option if(verbose) std::cout << color_yellow << "FREE SPACE" << color_reset << std::endl; sandSkewCUDA(distance); } void Sandman::sandGuideElementCUDA( const float length, const float entr_width, const float exit_width, const float exit_offset_h, const float mLeft, const float mRight, const float entr_height, const float exit_height, const float exit_offset_v, const float mTop, const float mBottom ) { /// /// Models a single piece of neutron guide by calling associated class /// functions, which in turn call cuda kernels. /// /// @param length length of guide element in metres /// /// @param entr_width width of entrance of guide element in metres /// /// @param exit_width width of exit of guide element in metres /// /// @param exit_offset_h horizontal offset of beam centre at the exit, relative to the entrance, in metres. /// /// @param mLeft the supermirror m value of the left side of the guide (left when looking at sample from neutron point of view) /// /// @param mRight the supermirror m value of the right side of the guide (right when looking at sample from neutron point of view) /// /// @param entr_height height of entrance of guide element in metres /// /// @param exit_height height of exit of guide element in metres /// /// @param exit_offset_v vertical offset of beam centre at the exit, relative to the entrance, in metres. /// /// @param mTop the supermirror m value of the top side of the guide /// /// @param mBottom the supermirror m value of the bottom side of the guide /// const float guideAngleTop = atan( (exit_offset_v + 0.5*(exit_height - entr_height)) / length); const float guideAngleBot = atan( (exit_offset_v + 0.5*(entr_height - exit_height)) / length); const float guideAngleLeft = atan( (exit_offset_h + 0.5*(exit_width - entr_width)) / length); const float guideAngleRight = atan( (exit_offset_h + 0.5*(entr_width - exit_width)) / length); //Propagate the neutrons to the end of the guide first sandSkewCUDA(length); //Reflect the vertical plane //sandReflectionH(const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mTop, const float mBottom, const int numElements) // sandReflectionH( // 0.5f*exit_width + exit_offset_h, // mirror top // -0.5f*exit_width + exit_offset_h,// mirror bottom // guideAngleTop, // guideAngleBot, // mTop, // mBottom); // sandReflectionV( // 0.5f*exit_height + exit_offset_v, mirror top // -0.5f*exit_height + exit_offset_v,mirror bottom // guideAngleLeft, // guideAngleRight, // mLeft, // mRight); //ERROR - this was H, width, top, bottom! sandReflectionV( 0.5f*exit_height + exit_offset_v, //mirror top -0.5f*exit_height + exit_offset_v,//mirror bottom guideAngleTop, guideAngleBot, mTop, mBottom); //ERROR - this was V, height, left right! sandReflectionH( 0.5f*exit_width + exit_offset_h, //mirror top -0.5f*exit_width + exit_offset_h,//mirror bottom guideAngleLeft, guideAngleRight, mLeft, mRight); } void Sandman::sandSimpleStraightGuide( const float length, const float width, const float height, const float mval ) { /// /// A simple utility function for a straight guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "STRAIGHT GUIDE" << color_reset << std::endl; std::cout << "\twidth = " << width << std::endl; std::cout << "\theight = " << height << std::endl; std::cout << "\tlength = " << length << std::endl; std::cout << "\t m = " << mval << std::endl; sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tStraight guide finished" << std::endl; } void Sandman::sandTaperedStraightGuide( const float length, const float entranceWidth, const float entranceHeight, const float exitWidth, const float exitHeight, const float mval ) { /// /// A simple utility function for a straight guide of linearly changing cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param entranceWidth width of the entrance of the guide in metres /// /// @param entranceHeight height of the entrance of the guide in metres /// /// @param exitWidth width of the exit of the guide in metres /// /// @param exitHeight height of the exit of the guide in metres /// /// @param mval the supermirror m value of all surfaces /// //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(entranceWidth, entranceHeight, true); std::cout << color_yellow << "STRAIGHT TAPERED GUIDE" << color_reset << std::endl; std::cout << "\tentrance width = " << entranceWidth << std::endl; std::cout << "\tentrance height = " << entranceHeight << std::endl; std::cout << "\t exit width = " << exitWidth << std::endl; std::cout << "\t exit height = " << exitHeight << std::endl; std::cout << "\t length = " << length << std::endl; std::cout << "\t m = " << mval << std::endl; sandGuideElementCUDA(length, entranceWidth, exitWidth, 0.0, mval, mval, entranceHeight, exitHeight, 0.0, mval, mval); std::cout << "\tStraight tapered guide finished" << std::endl; } void Sandman::sandCurvedGuide( const float length, const float sectionLength, const float width, const float height, const float mval, const float radius ) { /// /// A simple utility function for a curved guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param sectionLength length of guide sections in metres (typically 0.5, /// 1, or 2 metres in practice) /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// /// @param radius the radius of curvature of the guide in metres /// int i=0; //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "CURVED GUIDE CHANNEL" << color_reset << std::endl; std::cout << "\tradius " << radius << " width " << width << " length " << length << " sectionLength " << sectionLength << std::endl; if(radius != 0.0) { //Break into sections int numSections = (int) round(length / sectionLength); float sectionAngle; //Special case - one section. //This is two tweaks of rotation surrounding a short, straight guide piece //the piece plane at the centre lies along the tangent of the curve at that point if(2.0*sectionLength > length) { sectionAngle = asin(0.5*length / radius); std::cout << "\tsection " << i+1 << " "; sandRotationH(sectionAngle); //sandSimpleStraightGuide(length, width, height, mval); sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); sandRotationH(sectionAngle); std::cout << "\tCurved guide channel finished" << std::endl; return; } //Otherwise we do normal curved guide sectionAngle = 2.0*asin(0.5*sectionLength / radius); //Normal case, many sections of finite length for(i=0; i<numSections; i++) { if(i != numSections-1) //if we are not doing the last iteration so do a normal straight guide plus rotation { //sandSimpleStraightGuide(sectionLength, width, height, mval); sandGuideElementCUDA(sectionLength, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tsection " << i+1 << std::endl; sandRotationH(sectionAngle); } else //This is the last section, so take care with the length if it's not an integer multiple of sections //also, there is no rotation. The next module axis is aligned with this last piece, just as the //entrance is aligned with the previous axis { float lastPiece = length - (float)i * sectionLength; if(lastPiece <= 0.0) //i don't think that this can happen, but never mind break; std::cout << "\tsection " << i+1 << std::endl; //sandSimpleStraightGuide(lastPiece, width, height, mval); sandGuideElementCUDA(lastPiece, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); } } std::cout << "\tcurved guide channel finished" << std::endl; } } void Sandman::sandVerticallyCurvedGuide( const float length, const float sectionLength, const float width, const float height, const float mval, const float radius ) { /// /// A simple utility function for a curved guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param sectionLength length of guide sections in metres (typically 0.5, /// 1, or 2 metres in practice) /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// /// @param radius the radius of curvature of the guide in metres /// int i=0; //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "VERTICALLY CURVED GUIDE CHANNEL" << color_reset << std::endl; if(radius != 0.0) { //Break into sections int numSections = (int) round(length / sectionLength); float sectionAngle; //Special case - one section. //This is two tweaks of rotation surrounding a short, straight guide piece //the piece plane at the centre lies along the tangent of the curve at that point if(2.0*sectionLength > length) { sectionAngle = asin(0.5*length / radius); std::cout << "\tsection " << i+1 << " "; sandRotationH(sectionAngle); //sandSimpleStraightGuide(length, width, height, mval); sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); sandRotationV(sectionAngle); std::cout << "\tCurved guide channel finished" << std::endl; return; } //Otherwise we do normal curved guide sectionAngle = 2.0*asin(0.5*sectionLength / radius); //Normal case, many sections of finite length for(i=0; i<numSections; i++) { if(i != numSections-1) //if we are not doing the last iteration so do a normal straight guide plus rotation { //sandSimpleStraightGuide(sectionLength, width, height, mval); sandGuideElementCUDA(sectionLength, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tsection " << i+1 << std::endl; sandRotationV(sectionAngle); } else //This is the last section, so take care with the length if it's not an integer multiple of sections //also, there is no rotation. The next module axis is aligned with this last piece, just as the //entrance is aligned with the previous axis { float lastPiece = length - (float)i * sectionLength; if(lastPiece <= 0.0) //i don't think that this can happen, but never mind break; std::cout << "\tsection " << i+1 << std::endl; //sandSimpleStraightGuide(lastPiece, width, height, mval); sandGuideElementCUDA(lastPiece, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); } } std::cout << "\tvertically curved guide channel finished" << std::endl; } } void Sandman::ellipticOpeningGuide(const float length, const float exitWidth, const float exitHeight, const float focalPoint1H, const float focalPoint2H, const float focalPoint1V, const float focalPoint2V, const float mNumber, const int numSections) { //Models a focussing elliptic guide by using straight sections //Focal lengths are defined relative to the entrance plane float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; //float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hEllipseOpeningProfile.csv"; const char* filenameV = "vEllipseOpeningProfile.csv"; std::cout << color_yellow << "OPENING HALF ELLIPSE" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and exit width = " << exitWidth << " m focus2 at " << focalPoint2H << " " << focalPoint2V << " and focus1 at " << focalPoint1H << " " << focalPoint1V << " formed by " << numSections << " sections of " << section_length << " m" << std::endl; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * elliptic_curve(pieceStartx, focalPoint1H, focalPoint2H, exitWidth); pieceExitWidth = 2.0f * elliptic_curve(pieceEndx, focalPoint1H, focalPoint2H, exitWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * elliptic_curve(pieceStartx, focalPoint1V, focalPoint2V, exitHeight); pieceExitHeight = 2.0f * elliptic_curve(pieceEndx, focalPoint1V, focalPoint2V, exitHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == (numSections - 1)) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " exit " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " exit " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA( section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tElliptic opening guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::ellipticClosingGuide(const float length, const float entrWidth, const float entrHeight, const float focalPoint1H, const float focalPoint2H, const float focalPoint1V, const float focalPoint2V, const float mNumber, const int numSections) { //Models a focussing elliptic guide by using straight sections float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hEllipseClosingProfile.csv"; const char* filenameV = "vEllipseClosingProfile.csv"; std::cout << color_yellow << "CLOSING HALF ELLIPSE" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and focus2 at " << focalPoint2H << " " << focalPoint2V << " and focus1 at " << focalPoint1H << " " << focalPoint1V << " formed by " << numSections << " sections of m=" << mNumber; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * elliptic_curve(pieceStartx, focalPoint1H, focalPoint2H, entrWidth); pieceExitWidth = 2.0f * elliptic_curve(pieceEndx, focalPoint1H, focalPoint2H, entrWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * elliptic_curve(pieceStartx, focalPoint1V, focalPoint2V, entrHeight); pieceExitHeight = 2.0f * elliptic_curve(pieceEndx, focalPoint1V, focalPoint2V, entrHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == numSections - 1) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA(section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tElliptic closing guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::parabolicOpeningGuide(const float length, const float exitWidth, const float exitHeight, const float focalPointH, const float focalPointV, const float mNumber, const int numSections) { //Models a focussing parabolic guide by using straight sections //Focal lengths are defined relative to the entrance plane float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hParabolaOpeningProfile.csv"; const char* filenameV = "vParabolaOpeningProfile.csv"; std::cout << color_yellow << "OPENING PARABOLA" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and exit width = " << exitWidth << " m focus H at " << focalPointH << " and focus V at " << focalPointV << " formed by " << numSections << " sections of " << section_length << " m" << std::endl; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * parabolic_opening_curve(pieceStartx, length, focalPointH, exitWidth); pieceExitWidth = 2.0f * parabolic_opening_curve(pieceEndx, length, focalPointH, exitWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * parabolic_opening_curve(pieceStartx, length, focalPointV, exitHeight); pieceExitHeight = 2.0f * parabolic_opening_curve(pieceEndx, length, focalPointV, exitHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == (numSections - 1)) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " exit " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " exit " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA( section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tParabolic opening guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::parabolicClosingGuide(const float length, const float entrWidth, const float entrHeight, const float focalPointH, const float focalPointV, const float mNumber, const int numSections) { //Models a focussing parabolic guide by using straight sections float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hParabolaClosingProfile.csv"; const char* filenameV = "vParabolaClosingProfile.csv"; std::cout << color_yellow << "CLOSING PARABOLA" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and focus H at " << focalPointH << " " << " and focus V at " << focalPointV << " " << " formed by " << numSections << " sections of m=" << mNumber; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * parabolic_closing_curve(pieceStartx, focalPointH, entrWidth); pieceExitWidth = 2.0f * parabolic_closing_curve(pieceEndx, focalPointH, entrWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * parabolic_closing_curve(pieceStartx, focalPointV, entrHeight); pieceExitHeight = 2.0f * parabolic_closing_curve(pieceEndx, focalPointV, entrHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == numSections - 1) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA(section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tParabolic closing guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::sandHorizontalBender( const float length, const float width, const float height, const int numChannels, const float waferThickness, const float radius, const float mval ) { //This is a one-off, but malloc is expensive to use repetitively, so use //array of dedicated channel number floats const float nChannels = (float) numChannels; std::cout << color_yellow << "MULTI-CHANNEL HORIZONTAL BENDER" << color_reset << std::endl; if(nChannels < 1.0) { std::cerr << color_red << "ERROR:" << color_reset << " attempt to use horizontal bender with < 1 channels" << std::endl; exit(1); } //Find the width of the empty space in a single channel const float opticalWidth = (width / nChannels) - 0.5*waferThickness; if(opticalWidth < 0.001) { std::cerr << color_red << "ERROR:" << color_reset << " optical width is less than 1 mm in horizontal bender module (value is " << opticalWidth << ")" << std::endl; std::cerr << "\t width = " << width << "; nChannels = " << nChannels << std::endl; exit(1); } std::cout << nChannels << " channel bender " << width << " wide and of length " << length << " from wafers of thickness " << waferThickness << " and channels " << opticalWidth << " wide" << std::endl; //Kill neutrons missing the entrance of the system sandApertureCUDA(width,height, true); //First squeeze the neutrons into the channel sandSqueezeHorizontalBenderChannels(width, nChannels, waferThickness); //Propagate a normal curved guide with shorter, 20 cm long pieces sandCurvedGuide(length, 0.2f, opticalWidth, height, mval, radius); //UnSqueeze the neutrons out of the channel sandUnSqueezeHorizontalBenderChannels(width, nChannels, waferThickness); } void Sandman::sandVerticalBender( const float length, const float width, const float height, const int numChannels, const float waferThickness, const float radius, const float mval ) { //This is a one-off, but malloc is expensive to use repetitively, so use //array of dedicated channel number floats const float nChannels = (float) numChannels; std::cout << color_yellow << "MULTI-CHANNEL VERTICAL BENDER" << color_reset << std::endl; if(nChannels < 1.0) { std::cerr << color_red << "ERROR:" << color_reset << " attempt to use vertical bender with < 1 channels" << std::endl; exit(1); } const float opticalHeight = (height / nChannels) - 0.5*waferThickness; if(opticalHeight < 0.001) { std::cerr << color_red << "ERROR:" << color_reset << " optical height is less than 1 mm in vertical bender module (value is " << opticalHeight << ")" << std::endl; std::cerr << "\t height = " << height << "; nChannels = " << nChannels << std::endl; exit(1); } std::cout << nChannels << " channel bender " << height << " tall and of length " << length << " from wafers of thickness " << waferThickness << std::endl; //Kill neutrons missing the entrance of the system sandApertureCUDA(width, height, true); //First squeeze the neutrons into the channel sandSqueezeVerticalBenderChannels(height, nChannels, waferThickness); //Propagate a normal curved guide with 20 cm long pieces sandVerticallyCurvedGuide(length, 0.2f, width, opticalHeight, mval, radius); //UnSqueeze the neutrons out of the channel sandUnSqueezeVerticalBenderChannels(height, nChannels, waferThickness); } void Sandman::sample( const float width, const float height, const float win_width, const float win_height, const float hoffset, const float voffset, const float win_dist, const float lambdaMin, const float lambdaMax, const std::string& monitorNameStem) { /// /// Generates the initial beam phase space from the given requirements. /// /// @param width width of sample in metres /// /// @param height height of sample in metres /// /// @param win_width the width of the beam at the exit of the guide in metres /// /// @param win_height the height of the beam at the exit of the guide in metres /// /// @param hoffset the horizontal offset of the sample relative to the beam /// axis (metres) positive is left as viewed from the guide exit --- this is /// almost certainly zero in most cases /// /// @param vertical offset of the sample relative to the beam axis (metres) /// positive being up --- this is almost certainly zero in most cases /// /// @param win_dist the distance from the guide exit to the sample position /// /// @param lambdaMin the minimum neutron wavelength needed at the sample position /// /// @param lambdaMax the maximum neutron wavelength needed at the sample position /// const float yMaxH = hoffset + 0.5*win_width; const float yMinH = hoffset - 0.5*win_width; const float yMaxV = voffset + 0.5*win_height; const float yMinV = voffset - 0.5*win_height; const float thetaMaxH = atan( (0.5*width + 0.5*win_width + hoffset) / win_dist); const float thetaMinH = atan( (-0.5*width - 0.5*win_width + hoffset) / win_dist); const float thetaMaxV = atan( (0.5*height + 0.5*win_height + voffset) / win_dist); const float thetaMinV = atan( (-0.5*height - 0.5*win_height + voffset) / win_dist); const float thetaMaxPrimeH = atan( (0.5*width - 0.5*win_width + hoffset) / win_dist); const float thetaMinPrimeH = atan( (-0.5*width + 0.5*win_width + hoffset) / win_dist); const float thetaMaxPrimeV = atan( (0.5*height - 0.5*win_height + voffset) / win_dist); const float thetaMinPrimeV = atan( (-0.5*height + 0.5*win_height + voffset) / win_dist); // The next part comes from // http://mathworld.wolfram.com/TrianglePointPicking.html // v1 is along x // (theta) axis, v2 is up the right diagonal line const float oxH = thetaMinH; const float oyH = yMinH; const float v1xH = thetaMaxPrimeH - thetaMinH; // v1y is zero const float v2xH = thetaMaxH - thetaMaxPrimeH; const float v2yH = yMaxH - yMinH; const float oxV = thetaMinV; const float oyV = yMinV; const float v1xV = thetaMaxPrimeV - thetaMinV; // v1y is zero const float v2xV = thetaMaxV - thetaMaxPrimeV; const float v2yV = yMaxV - yMinV; //Normalisation of solid angle (NOTE: moderator units are per cm2!) const float a1 = 100.0f * 100.0f * width * height; const float a2 = 100.0f * 100.0f * win_width * win_height; const float deltaAdeltaO = a1 * a2 / (100.0f * 100.0f * win_dist*win_dist); std::cout << "\tSolid angle normalisation: " << deltaAdeltaO << std::endl; deltaLambdag = fabs(lambdaMax-lambdaMin); sourceDeltaLambda = deltaLambdag; if(deltaLambdag < 0.0001) // Zero wavelength band is going to screw up the // maths. Put in an artificial, small band // hidden from the user deltaLambdag = 0.01; /// \todo Replace this maxElements with the memory-dependent check if(numElements > maxElements) { std::cerr << "\tMaximum number of elements exceeded." << std::endl; exit(1); } //Generate 1 array of random numbers for wavelength generateOneRandomArray(); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sample wavelength allocation with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_sandAllocateWavelength), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_lambdag, d_r1g, lambdaMin, deltaLambdag, numElements); // printf("CUDA kernel sample Vertical wavelength allocation with %d blocks of %d threads\n", blocksPerGrid, // threadsPerBlock); // global_sandAllocateWavelength<<<blocksPerGrid, threadsPerBlock>>> // (d_lambdaVg, d_r2g, lambdaMin, deltaLambdag, numElements); // Report to user the memory usage for the work size_t freeMemBytes, totalMemBytes; checkCudaErrors(hipMemGetInfo( &freeMemBytes, &totalMemBytes)) ; int freeMem = (int)freeMemBytes ; int totalMem = (int)totalMemBytes ; int allocMem = totalMem - freeMem ; printf("\tGPU mem: alloc = %i MB, free = %i MB, tot = %i MB\n", allocMem/1024/1024, freeMem/1024/1024, totalMem/1024/1024); printf("\t-------------------------\n"); printf("\tMemory used: %i percent\n", 100*allocMem/totalMem); printf("\t-------------------------\n"); //Generate 2 arrays of random numbers generateBothRandomArrays(); if(showCUDAsteps) printf("\tCUDA kernel sample Horizontal with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_sandSampleCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_pointsThetaH, d_weightHg, d_r1g, d_r2g, oxH, oyH, v1xH, v2xH, v2yH, numElements); //Generate 2 new arrays of random numbers generateBothRandomArrays(); if(showCUDAsteps) printf("\tCUDA kernel sample Vertical with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_sandSampleCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_pointsThetaV, d_weightVg, d_r1g, d_r2g, oxV, oyV, v1xV, v2xV, v2yV, numElements); // Initialise trajectory brightness with the solid angle calculation if(showCUDAsteps) std::cout << "\tCUDA kernel initArray on moderator flux with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; hipLaunchKernelGGL(( global_initArray), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_modFlux, deltaAdeltaO, numElements); //Perhaps the user wants to get a phase space map at the sample position. //If that is the case, we need to do a little back and forth fudgery if(!monitorNameStem.empty()) { //Here we go backwards to the sample plane, snapshot a monitor, then //go forwards back to the guide entrance, and take an insignificant //rounding error hit std::cout << "\tGenerating snapshot for sample position monitor" << std::endl; //Go backwards sandFreeSpaceCUDA(-win_dist); //Create phase space snapshots phaseSpaceMapH( (monitorNameStem + "Horizontal2D.csv").c_str(), yMinH, yMaxH, thetaMinH, thetaMaxH ); //Go forwards again before continuing with the rest sandFreeSpaceCUDA(win_dist); } } ////////////////////////////////////////// // // // Private Functions // // // // and // // // // kernels // // // ////////////////////////////////////////// /// /// Unit test setup function to seed the Y values /// /// @param ypoints pointer to host memory that needs to be copied over /// void Sandman::unitTestInitPhaseSpace(const float *ypoints, const float *pointsTheta, const float *weight) { //Copy to device (lets use horizontal plane) to overwrite checkCudaErrors(hipMemcpy(d_pointsYH, ypoints, 32*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_pointsThetaH, pointsTheta, 32*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_weightHg, weight, 32*sizeof(float), hipMemcpyHostToDevice)); } void Sandman::unitTestGetPhaseSpace(float *ypoints, float *pointsTheta, float *weight) { //Copy to device (lets use horizontal plane) to overwrite checkCudaErrors(hipMemcpy(ypoints, d_pointsYH, 32*sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(pointsTheta, d_pointsThetaH, 32*sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(pointsTheta, d_weightHg, 32*sizeof(float), hipMemcpyDeviceToHost)); } void Sandman::displayWelcome(void) { /// /// Presents welcome message when called by constructor. /// std::cout << "****************************************" << std::endl; std::cout << "* *" << std::endl; std::cout << "* " << color_red << "SANDMAN" << color_reset << " *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* Implementation of Neutron beam *" << std::endl; std::cout << "* transport on GPU in C++ and CUDA *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* " << color_yellow << "[email protected]" << color_reset << " 2016 *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* Released under BSD license *" << std::endl; std::cout << "* *" << std::endl; std::cout << "****************************************" << std::endl; } void Sandman::generateRandomArray(float *array) { /// /// Presents welcome message when called by constructor. /// #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif printf("\tGenerating random numbers on GPU\n"); checkCudaErrors(hiprandGenerateUniform(prngGPU, (float *) array, numElements)); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif } void Sandman::zeroHistogram1D(void) { printf("\tZeroing 1D histogram\n"); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(100 + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel zero 1d histogram[" << 100 << "] with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_sandZeroHistogram1D(float *d_histogram, const int numElements) hipLaunchKernelGGL(( global_sandZeroHistogram1D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_histogram1D); #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif } void Sandman::zeroHistogram2D(void) { printf("\tZeroing 2D histogram.\n"); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(100*100 + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel zeroHistogram2D with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //void global_sandZeroHistogram1D(float *d_histogram, const int numElements) hipLaunchKernelGGL(( global_sandZeroHistogram2D), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, (float (*)[100])d_histogram2D); } float Sandman::arrayMinimum(const float *d_array, float *d_answer) { float h_answer[1]; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel arrayMin %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // // Zero the count on the host // h_answer[0] = 10000.0f; // // Copy the zero total to device memory // checkCudaErrors(hipMemcpy(d_answer, h_answer, sizeof(float), hipMemcpyHostToDevice)); #ifdef DEBUG hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif printf("\tCounting up phase space\n"); //void global_countNeutrons(float *numNeutrons, const float *weight, const int numElements) hipLaunchKernelGGL(( global_arrayMinimum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_array, d_answer, numElements); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif //Copy answer out of device memory for host reporting checkCudaErrors(hipMemcpy(h_answer, d_answer, sizeof(float), hipMemcpyDeviceToHost)); #ifdef DEBUG if (errSync != hipSuccess) std::cout << "Sync kernel error: " << hipGetErrorString(errSync) << std::endl; if (errAsync != hipSuccess) std::cout << "Async kernel error: " << hipGetErrorString(errAsync) << std::endl; #endif printf("Got %f minimum\n", h_answer[0]); return(h_answer[0]); } float Sandman::arrayMaximum(const float *d_array, float *d_answer) { float h_answer[1]; //for debugging int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel arrayMax %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // // Zero the count on the host // h_answer[0] = -10000.0f; // // Copy the zero total to device memory // checkCudaErrors(hipMemcpy(d_answer, h_answer, sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( global_arrayMaximum), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_array, d_answer, numElements); //Copy total out of device memory for host reporting checkCudaErrors(hipMemcpy(h_answer, d_answer, sizeof(float), hipMemcpyDeviceToHost)); printf("Got %f maximum\n", h_answer[0]); return(h_answer[0]); } void Sandman::sandGetPhaseSpaceH(float *h_pointsY, float *h_pointsTheta, float *h_weight) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(hipMemcpy(h_pointsY, d_pointsYH, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_pointsTheta, d_pointsThetaH, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_weight, d_weightHg, numElements * sizeof(float), hipMemcpyDeviceToHost)); } void Sandman::sandGetPhaseSpaceV(float *h_pointsY, float *h_pointsTheta, float *h_weight) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(hipMemcpy(h_pointsY, d_pointsYV, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_pointsTheta, d_pointsThetaV, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_weight, d_weightVg, numElements * sizeof(float), hipMemcpyDeviceToHost)); } void Sandman::sandDebugPosPos(float *h_pointsH, float *h_weightH, float *h_pointsV, float *h_weightV) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(hipMemcpy(h_pointsH, d_pointsYH, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_weightH, d_weightHg, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_pointsV, d_pointsYV, numElements * sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_weightV, d_weightVg, numElements * sizeof(float), hipMemcpyDeviceToHost)); } ///Calculates which channel number (left to right) the neutron sits in, then ///shifts all phase space to fit in a single version of that channel so that ///the curved guide module can be used to do the transport for a multi-channel ///bender. Then the opposite function "unSqueeze..." reverses this process void Sandman::sandSqueezeHorizontalBenderChannels(const float width, const float numChannels, const float waferThickness) { //Channel width in each case includes one wafer thickness, which is attenuated if the neutron hits it float channelWidth = (width + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device now computes //relativeY = ypos[i] + width/2.0; //channelNumber = roundf( relativeY / channelwidth ); //That is stored in tempArray //Then we adjust the position to be within a single channel of the right thickness for the OPTICS //ypos[i] = ypos[i] + width/2.0f; //ypos[i] = ypos[i] / channelNumber; //ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel squeeze h bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_squeezeBenderChannel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_tempArray, width, channelWidth, waferThickness, numElements); } ///"Squeeze...() calculates which channel number (left to right) the neutron ///sits in, then shifts all phase space to fit in a single version of that ///channel so that the curved guide module can be used to do the transport for ///a multi-channel bender. This function "unSqueeze..." reverses this process ///after the bender has been done void Sandman::sandUnSqueezeHorizontalBenderChannels(const float width, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelWidth = (width + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device reverses the position adjustment of sandSqueeze... //ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); //ypos[i] = ypos[i] * channelNumber; //ypos[i] = ypos[i] - width/2.0f; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel unsqueeze h bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_unSqueezeBenderChannel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYH, d_tempArray, width, channelWidth, waferThickness, numElements); } ///Calculates which channel number (left to right) the neutron sits in, then ///shifts all phase space to fit in a single version of that channel so that ///the curved guide module can be used to do the transport for a multi-channel ///bender. Then the opposite function "unSqueeze..." reverses this process void Sandman::sandSqueezeVerticalBenderChannels(const float height, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelHeight = (height + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device now computes //relativeY = ypos[i] + width/2.0; //channelNumber = roundf( relativeY / channelwidth ); //That is stored in tempArray //Then we adjust the position to be within a single channel of the right thickness for the OPTICS //ypos[i] = ypos[i] + width/2.0f; //ypos[i] = ypos[i] / channelNumber; //ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel squeeze v bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_squeezeBenderChannel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_tempArray, height, channelHeight, waferThickness, numElements); } ///"Squeeze...() calculates which channel number (left to right) the neutron ///sits in, then shifts all phase space to fit in a single version of that ///channel so that the curved guide module can be used to do the transport for ///a multi-channel bender. This function "unSqueeze..." reverses this process ///after the bender has been done void Sandman::sandUnSqueezeVerticalBenderChannels(const float height, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelHeight = (height + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device reverses the position adjustment of sandSqueeze... //ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); //ypos[i] = ypos[i] * channelNumber; //ypos[i] = ypos[i] - width/2.0f; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel unsqueeze v bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( global_unSqueezeBenderChannel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_pointsYV, d_tempArray, height, channelHeight, waferThickness, numElements); }
bd1c78597b34332284574229a3a89d7f44f0eecf.cu
////////////////////////////////////////////////////////////////////////////////// /// /// @sandmanCUDA.cu /// @author Phil Bentley <[email protected] /// @version 1.0 /// /// @section LICENSE /// /// BSD 3-Clause License /// /// Copyright (c) 2016, Phil Bentley /// All rights reserved. /// /// Redistribution and use in source and binary forms, with or without /// modification, are permitted provided that the following conditions are met: /// /// * Redistributions of source code must retain the above copyright notice, this /// list of conditions and the following disclaimer. /// /// * Redistributions in binary form must reproduce the above copyright notice, /// this list of conditions and the following disclaimer in the documentation /// and/or other materials provided with the distribution. /// /// * Neither the name of the copyright holder nor the names of its /// contributors may be used to endorse or promote products derived from /// this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" /// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE /// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE /// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE /// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL /// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR /// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER /// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE /// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /// /// /// /// @section DESCRIPTION /// /// Sandman is a ridiculously fast monte-carlo code for simulating /// polychromatic neutron beams. /// /// Sandman uses the math in neutron acceptance diagram shading /// (nads, which is monochromatic) to implement a monte-carlo method /// of ray tracing, by breaking up the simulation into two /// independent planes with finite phase space boundaries. This is /// significantly faster than 3D tracing plane intersections, even /// though it produces mathematically identical output. The /// limitation is that it can only simulate beams where the /// horizontal and vertical phase spaces are independent /// (e.g. rectangular neutron guides). /// /// This code provides to the user a shared library (.so) which you /// can install on your system. Thereafter, you create a sandman /// program that represents the instrument simulation and link this /// library. Calling the sandman class public functions creates a /// simulation of a neutron beam on an NVIDIA GPU using NVIDIA'S /// CUDA API. /// /// There are some fundamental differences between this code and /// existing codes at the time of writing. /// /// The geometry definition begins at the SAMPLE POSITION, and works /// backwards. This is for very good reason. Start with the phase /// space you need, and work from there. It's also orders of /// magnitude quicker in most cases to work like this. To handle /// this reverse tracing method, sandman's beam monitors and /// calculations have been specially written in a way to provide /// correct results in the backwards or forwards direction. For /// example, the beam monitor functions store a copy of the position /// of the neutrons, and mirror the divergence; then at the end of /// the calculation the statistical weight is calculated, so that /// the beam profile at that position matches the result that VITESS /// or MCSTAS would give you when simulating forwards. Nonetheless, /// there is nothing in the code that prevents you from doing a /// forwards simulation --- if you insist! Just define a sample /// with sandman that is the same size as the moderator, and a /// sandman moderator that is the same size as the instrument /// sample, and set the mirror image parameter in the relevant /// monitor functions to "false". /// ////////////////////////////////////////////////////////////////////////////////// #include <curand.h> #include <helper_cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <fstream> #include <iostream> #include "../include/sandmanCUDA.h" #define DEBUG 1 // Physical Constants const static float thetaCritNickel=0.099138f; #define NICKEL_REFLECTIVITY 0.967f const static float thetaCritStandardLambda = 1.0f; const static int maxElements = 100000000; const static float deadWeight = 0.001f; const static float PI_FLOAT = 3.1415927f; // Define colours for terminal output text highlighting const static std::string color_red("\033[0;31m"); const static std::string color_green("\033[1;32m"); const static std::string color_yellow("\033[1;33m"); const static std::string color_cyan("\033[0;36m"); const static std::string color_magenta("\033[0;35m"); const static std::string color_reset("\033[0m"); std::string remove_extension(const std::string& filename) { size_t lastdot = filename.find_last_of("."); if (lastdot == std::string::npos) return filename; return (filename.substr(0, lastdot)); } __host__ __device__ static inline float radians2degrees(const float radians) { return(radians * 180.0f / PI_FLOAT); } __host__ __device__ static inline float degrees2radians(const float degrees) { return(degrees * PI_FLOAT / 180.0f); } __host__ __device__ static inline float square2circleFlux(const float num) { //Ratio of area of circle to area of square is PI/4 return ( num / (PI_FLOAT / 4.0f)); } __host__ __device__ static float elliptic_curve(const float xpos, const float fp1, const float fp2, const float maxWidth) { //An ellipse where the entrance width is specified //find centre of ellipse independent of order of fp1 and fp2 const float x0 = (fp1 + fp2) / 2.0f; //find the absolute focal point relative to x0 float f = fabsf((fp2 - fp1) / 2.0f); //b value is half the width float b = maxWidth / 2.0f; //return the y value of the curve float y2 = b*b - (xpos-x0)*(xpos-x0)*b*b/(f*f-b*b); return(sqrtf(fabsf(y2))); } __host__ __device__ static float parabolic_closing_curve(float xpos, float foc, float inw) { float x0; float ans; x0 = (2.0f * foc + sqrtf(4.0f * foc * foc + inw * inw)) / 4.0f; ans = 2.0f * sqrtf((foc - x0) * (xpos - x0)); return (ans); } __host__ __device__ static float parabolic_opening_curve(float xpos, float len, float foc, float outw) { float x0; float ans; x0 = (2.0f*foc + 2.0f*len - sqrtf(4.0f*foc*foc - 8.0f*foc*len + 4.0f*len*len + outw*outw))/4.0f; ans = 2.0f*sqrtf((foc - x0)*(-x0 + xpos)); return (ans); } __global__ static void global_countNeutrons0(float *numNeutrons, const float *weightH, const float *weightV, const int numElements) { __shared__ float sharedTotal; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) //Boss thread zeros the shared counter if(tid == 0) { sharedTotal=0.0f; } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //Any pair could actually be multiplied in this step, assuming //no horizontal and vertical correlations atomicAdd(&sharedTotal, weightH[i]*weightV[i]); } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal); } } __syncthreads(); } // Array reduction routines. Tried many of these, some are faster than others. __device__ void blockReduce1(float *array) { // Interleaved addressing, reduction #1 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=1; s < SANDMAN_CUDA_THREADS; s*=2) { if(tid % (2*s) == 0) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce2(float *array) { // Interleaved addressing, reduction #2 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; int index; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=1; s < SANDMAN_CUDA_THREADS; s*=2) { index = 2 * s*tid; if(index < SANDMAN_CUDA_THREADS) { sharedTotal[index] += sharedTotal[index +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce3(float *array) { // Sequential addressing, reduction #3 from nvidia __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=SANDMAN_CUDA_THREADS/2; s > 0; s>>=1) { if(tid < s) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } } __device__ void blockReduce4_DO_NOT_USE(float *array) { //DOES NOT WORK! There is a bug somewhere... // Sequential addressing plus uroll loops __shared__ float sharedTotal[512]; int tid = threadIdx.x; //Work in local shared memory copy sharedTotal[tid] = array[tid]; __syncthreads(); for(unsigned int s=SANDMAN_CUDA_THREADS/2; s > 32; s>>=1) { if(tid < s) { sharedTotal[tid] += sharedTotal[tid +s]; } __syncthreads(); } if(tid <= 32) { sharedTotal[tid] += sharedTotal[tid+32]; sharedTotal[tid] += sharedTotal[tid+16]; sharedTotal[tid] += sharedTotal[tid+8]; sharedTotal[tid] += sharedTotal[tid+4]; sharedTotal[tid] += sharedTotal[tid+2]; sharedTotal[tid] += sharedTotal[tid+1]; } //Write back to block master thread if(tid == 0) { array[0] = sharedTotal[0]; } __syncthreads(); } __global__ static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float deltaLambda, const float *modFlux, const int numTraj) { //Shared memory per thread block We can just use 512 knowing that the number //of threads will be 128 or 256 or something __shared__ float sharedTotal[512]; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; const float nTraj = (float) numTraj; float element; //Try doing this one neutron per thread, for fun - and simpler code ;) //All threads zero their shared counter //sharedTotal[i]=0.0f; //__syncthreads(); //Probably not needed until the last moment before block reduction //Each thread adds the weight of its neutron to the shared total if(i<numTraj) { //sharedTotal[tid] = modFlux[i]*weightH[i]*weightV[i]/nElements; element = weightH[i] * weightV[i]; //in units of fractions of neutrons if(modFlux != NULL) { //Don't just calculate efficiency, user has specified a moderator. //In this case, each fractional neutron should be scaled by the //moderator brightness sampled by that trajectory, and then //normalised to the full simulation wavelength band and number of //trajectories if(nTraj > 0.0) element = element * deltaLambda * modFlux[i] / nTraj; else element = 0.0; } //if(isnan(element)) //element = 0.0; sharedTotal[tid] = element; __syncthreads(); // do block reduction on the shared memory using NVIDIA's tree method blockReduce1(sharedTotal); //Boss thread sums shared total and adds to the global total if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal[0]); } __syncthreads(); } } __global__ static void global_countTrajectories(float *numNeutrons, const float *weightH, const float *weightV, const int numElements) { //Shared memory per thread block __shared__ float sharedTotal[512]; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) //All threads zero their shared counter //sharedTotal[i]=0.0f; //__syncthreads(); //Probably not needed until the last moment before block reduction //Each thread adds the weight of its neutron to the shared total if(i<numElements) { sharedTotal[tid] = weightH[i]*weightV[i]; //if(isnan(sharedTotal[tid])) //sharedTotal[tid] = 0.0; __syncthreads(); // do block reduction on the shared memory using NVIDIA's tree method blockReduce3(sharedTotal); //Boss thread sums shared total and adds to the global total if(tid == 0) { atomicAdd(&numNeutrons[0], sharedTotal[0]); } __syncthreads(); } } ///Compresses the beam into a single virtual channel of the bender, to ///model a multi-channel bender by a single channel process (n ///channels would have n branches otherwise). __global__ static void global_squeezeBenderChannel(float *ypos, float *channelNumber, const float width, const float channelWidth, const float waferThickness, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; float relativeY; if(i<numElements) { relativeY = ypos[i] + width/2.0f; //The 'channelWidth' is defined as the empty space plus one //wafer thickness //Which channel does the neutron hit? Channel number starts at zero channelNumber[i] = floorf( relativeY / channelWidth ); //Then we adjust the position to be within a single channel of //the right thickness for the OPTICS ypos[i] = relativeY; ypos[i] = ypos[i] / (channelNumber[i]+1.0f); ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); } } ///Reverses the compression of the beam into a single channel of the ///bender, to model a multi-channel bender by a single channel process ///(n channels would have n branches otherwise). __global__ static void global_unSqueezeBenderChannel(float *ypos, const float *channelNumber, const float width, const float channelWidth, const float waferThickness, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; //float preY=0.0; if(i<numElements) { //float preY = ypos[i]; //Device reverses the position adjustment of sandSqueeze... ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); ypos[i] = ypos[i] * (channelNumber[i]+1.0f); ypos[i] = ypos[i] - width/2.0f; } } __global__ static void global_copyArray(const float *source, float *destination, const int numElements, const bool invert) { // Copies the values from one array to another int i = blockIdx.x*blockDim.x + threadIdx.x; //These nested conditionals do not cause branching problems because all //threads evaluate the same path if(i<numElements) { if(invert) destination[i] = -source[i]; else destination[i] = source[i]; } } __host__ __device__ static float maxwellian(const float brightness0, const float tempK, const float lambda_A) { //Describes a maxwellian curve based on the given parameters. This //maxwellian is the same curve as used by existing codes, so it //should agree with those (nads, mcstas) //Defined __host__ __device__ so it can be unit tested if required const float h = 6.626076E-34; const float m = 1.6749284E-27; const float k = 1.380662E-23; const float a=(1.0E10*1.0E10*h*h)/(2.0*k*m*tempK); return( brightness0*2.0*a*a*exp(-a/(lambda_A*lambda_A))/pow(lambda_A,5.0) ); } __host__ __device__ static float psiMODERATOR(const float lambda_A) { //Defined __host__ __device__ so it can be unit tested if required return( maxwellian(4.035E12f, 103.97f, lambda_A) + maxwellian(2.503E12, 25.56f, lambda_A) + maxwellian(1.399E13, 298.411f, lambda_A) ); } __host__ __device__ static float illHCS(const float lambda_A) { //Defined __host__ __device__ so it can be unit tested if required return( maxwellian(2.78E13f, 40.1f, lambda_A) + maxwellian(3.44E13, 145.8f, lambda_A) + maxwellian(1.022E13, 413.5f, lambda_A) ); } __global__ static void global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = 0.206f/2.0f; ymin = -ymax; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * illHCS(d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } } __global__ static void global_sandPSIModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = 0.206f/2.0f; ymin = -ymax; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * psiMODERATOR(d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } } __global__ static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) { // Calculates the total emitted neutron current represented by this // trajectory, based on its interception with one moderator surace // characterised by a single temperature temp, width width, positioned with // an offset hoffset, and a brightness num float ymax, ymin; ymax = hoffset + width/2.0; ymin = hoffset - width/2.0; int i = blockIdx.x*blockDim.x + threadIdx.x; //The sample module assigns the scaling factor related to solid angle, now we do moderator brightness if(d_modFluxH[i] < 10.0f) { //That check means we did not already calculate the flux, so we need to do it now: d_modFluxH[i] = d_modFluxH[i] * maxwellian(num, temp, d_lambdag[i]); } //Modify the weight if the neutron misses For one moderator, it is an easy //window For multiple moderators, we need to set the weight to the initial //value, then add multiples of that to an initially zeroed accumulator if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } } __global__ static void global_sandBrillianceTransferModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset) { // Simple brilliance transfer moderator float ymax, ymin; ymax = hoffset + width/2.0; ymin = hoffset - width/2.0; int i = blockIdx.x*blockDim.x + threadIdx.x; //Modify the weight if the neutron misses if(d_pointsYH[i] > ymax || d_pointsYH[i] < ymin) { d_weightH[i] = 0.0; } d_modFluxH[i] = 1.0; } __global__ static void global_sandSampleCUDA(float *d_pointsY, float *d_pointsTheta, float *d_weight, const float *d_r1, const float *d_r2, const float ox, const float oy, const float v1x, const float v2x, const float v2y, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_pointsY[i] = oy + d_r2[i]*v2y; d_pointsTheta[i] = ox + d_r1[i]*v1x + d_r2[i]*v2x; d_weight[i] = 1.0; } } __global__ static void global_initArray(float *d_array, const float value, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_array[i] = value; } } __global__ static void global_sandZeroHistogram1D(float d_histogram[100]) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<100) { { d_histogram[i] = 0.0f; } } } __global__ static void global_sandZeroHistogram2D(float d_histogram[100][100]) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j; if(i<100) { for(j=0; j<100; j++) { d_histogram[i][j] = 0.0f; } } } __global__ static void global_sandSkewCUDA(float *d_pointsY, const float *d_pointsTheta, const float distance_m, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsY[i] = d_pointsY[i] + distance_m * d_pointsTheta[i]; } } } __global__ static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsTheta[i] = d_pointsTheta[i] - angle_radians; } } } __global__ static void global_translation(float *d_pointsY, const float distance_m, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { d_pointsY[i] = d_pointsY[i] - distance_m; } } } __global__ static void global_roll_phase_space(float *d_hY, float *d_hQ, float *d_hw, float *d_vY, float *d_vQ, float *d_vw, const float theta, const int numElements) { // Rotates and mixes the phase space to simulate a rotation of the coordinate system around the beam axis // Once thought to be impossible, now horizontal and vertical phase space is mixed. FTW int i = blockIdx.x*blockDim.x + threadIdx.x; const float thrad = degrees2radians(theta); const float cth = cosf(thrad); const float sth = sinf(thrad); const float c2 = cth*cth; const float s2 = sth*sth; float vposp =0.0; float hposp =0.0; float vthp = 0.0; float hthp = 0.0; float vwp = 0.0; float hwp = 0.0; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Calculate the new positions after the rotation vposp = d_vY[i] * cth - d_hY[i] * sth; hposp = d_hY[i] * cth + d_vY[i] * sth; //Calculate the new divergences after the rotation vthp = d_vQ[i] * cth - d_hQ[i] * sth; hthp = d_hQ[i] * cth + d_vQ[i] * sth; //Calculate the new weights after the rotation vwp = d_vw[i] * c2 + d_hw[i] * s2; hwp = d_hw[i] * c2 + d_vw[i] * s2; //Copy all the results back into the arrays in place d_vY[i] = vposp; d_hY[i] = hposp; d_vQ[i] = vthp; d_hQ[i] = hthp; d_vw[i] = vwp; d_hw[i] = hwp; } } } __device__ inline static float low_pass_filter(const float value, const float cutOff) { // This function uses approximation to heaviside function with approximate // tanh running on hardware to avoid a branching if statement. Important // for thread divergence. return( 0.5f + 0.5f*tanh(2000.0f*(-value+cutOff))); } __device__ inline static float high_pass_filter(const float value, const float cutOff) { // High pass filter. values greater than cutOff have > 0 return value // This function uses approximation to heaviside function with approximate // tanh running on hardware to avoid a branching if statement. Important // for thread divergence. return( 0.5f + 0.5f*tanh(2000.0f*(value-cutOff))); } __global__ static void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Filter off lower points d_weight[i] = d_weight[i] * high_pass_filter(d_pointsTheta[i], lower_angle); //Filter off higher points d_weight[i] = d_weight[i] * low_pass_filter(d_pointsTheta[i], upper_angle); } } } __global__ static void global_aperture(float *d_weight, const float *d_pointsY, const float lower_position, const float upper_position, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { // Ignore dead neutrons //if(d_weight[i] > DEAD_WEIGHT) { //Filter off lower points // d_weight[i] = d_weight[i] * high_pass_filter(d_pointsY[i], lower_position); if(d_pointsY[i] < lower_position) d_weight[i] = 0.0; //Filter off higher points //d_weight[i] = d_weight[i] * low_pass_filter(d_pointsY[i], upper_position); if(d_pointsY[i] > upper_position) d_weight[i] = 0.0; // if(isnan(d_weight[i])) //{ // printf("NaN encountered. d_pointsY[i] = %f, lower_pos=%f, upper_pos=%f\n", d_pointsY[i], lower_position, upper_position); //} } } } __global__ static void global_beamstop(float *d_weight, const float *d_pointsY, const float lower_position, const float upper_position, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { if(d_pointsY[i] > lower_position && d_pointsY[i] < upper_position) { d_weight[i] = 0.0; } } } __host__ __device__ inline float device_criticalReflectivity(float mValue) { //Data taken from swiss neutronics. approximates the correct m value using a quadratic fit to their data return(-0.01288f*mValue*mValue+0.98f); } __host__ __device__ inline float device_critical_theta(const float wavln, /**< Wavelength of incident neutrons. */ const float mValue)/**< m value of the surface. */ { float ans; ans = wavln * mValue / thetaCritStandardLambda; ans = degrees2radians(ans); ans = ans * thetaCritNickel; return( ans); } __host__ __device__ float device_reflectivity_slow(const float theta_rads, /**< Angle of incidence in radians. */ const float lambda, const float mValue) /**< m value of reflecting surface. */ { //m=1 critical angle const float thetaCritM1 = device_critical_theta(lambda, 1.0f); //general critical angle const float thetaCrit = device_critical_theta(lambda, mValue); const float dist = fabsf(theta_rads); float attn0; float attnGrad; float ans; if(dist < thetaCritM1) { //Flat at low angles below m=1 ans = device_criticalReflectivity(1.0); } else if(dist < thetaCrit) { //linear decay to the knee value above m=1 attnGrad = (device_criticalReflectivity(mValue) - device_criticalReflectivity(1.0)) / (thetaCrit - thetaCritM1); attn0 = device_criticalReflectivity(1.0) - attnGrad*thetaCritM1; ans = attn0 + attnGrad * dist; } else { ans = 0.0f; } return(ans); } __device__ float device_reflectivity(const float theta_rads, /**< Angle of incidence in radians. */ const float lambda, const float mValue) /**< m value of reflecting surface. */ { //m=1 critical angle const float thetaCritM1 = device_critical_theta(lambda, 1.0f); //general critical angle const float thetaCrit = device_critical_theta(lambda, mValue); const float dist = fabsf(theta_rads); float attn0; float attnGrad; float ans=NICKEL_REFLECTIVITY; if(dist > thetaCritM1) { attnGrad=(device_criticalReflectivity(mValue) - NICKEL_REFLECTIVITY) / (thetaCrit - thetaCritM1); attn0 = NICKEL_REFLECTIVITY - attnGrad*thetaCritM1; ans = attn0 + attnGrad * dist; } //Multiply by low pass above thetaCrit if(dist > thetaCrit) { ans = 0.0f; } return(ans); } __device__ static float device_attenuate_alpha(const float valpha, const float lambda, const float theta, const float mValue) { //Attenuates the opacity of a vertex based on its divergence angle return (valpha * device_reflectivity(theta, lambda, mValue)); } __global__ static void global_sandAllocateWavelength(float *d_lambdaH, const float *d_r1g, const float lambda1, const float deltaLambda, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; if(i<numElements) { d_lambdaH[i] = lambda1 + d_r1g[i]*deltaLambda; //FMA this later } } __global__ static void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambda, const float *weightH, const float *weightV, const float *d_modflux, const float sourceDeltaLambda, const int numElements) { __shared__ float sharedLambdaHist[100]; int targetBin; int j; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; float element; //Boss thread zeros the shared counter if(tid == 0) { for(j=0; j<100 && j<histSize; j++) { sharedLambdaHist[j] = 0.0f; } } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //targetBin = (int) roundf(-0.5f + (lambda[i] - lambdaMin)/dLambda ); //targetBin = (int) rintf(-0.5f + (lambda[i] - lambdaMin)/dLambda ); targetBin = (int) rintf((lambda[i] - lambdaMin)/dLambda); //This function agrees with VITESS "normalise with binsize = no" //be certain to send non-zero dLambda to this function! element = weightH[i] * weightV[i];// / dLambda; // in units of fractions of trajectory per angstrom //Normalise to wavelength range element = element * sourceDeltaLambda; //in units of fractions of trajectory if(d_modflux != NULL) { element = element * d_modflux[i] / (float)numElements; //in units of neutrons per second } //if(isnan(element)) //element = 0.0; if( (targetBin >= 0) && (targetBin < 100) && (targetBin < histSize) ) { atomicAdd(&sharedLambdaHist[targetBin], element); } } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { for(j=0; j<100 && j<histSize; j++) atomicAdd(&lambdaHist[j], sharedLambdaHist[j]); } } __syncthreads(); } __global__ static void global_arrayMinimum(const float *array, float globalMin[1], const int numElements) { __shared__ float sharedMin; // This function DOES NOT WORK YET! There is a race condition int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; //Boss thread in first block initialises the global memory if(tid == 0 && bid == 0) { globalMin[0] = array[i]; } __syncthreads(); //Boss thread in warp initialises the shared memory if(tid == 0) { sharedMin = array[i]; } __syncthreads(); //Each thread checks it's value against the shared minimum, and overwrites it if necessary if(i < numElements) { //This has to be handled correctly, otherwise there is a race condition //at this point - the if statement is not synchronised and it overwrites if(array[i] < sharedMin) atomicExch(&sharedMin, array[i]); } __syncthreads(); //Boss thread overwrites global total if necessary if(i < numElements); { if(tid == 0) { if(sharedMin < globalMin[0]); atomicExch(&globalMin[0], sharedMin); } } __syncthreads(); } __global__ static void global_arrayMaximum(const float *array, float globalMax[1], const int numElements) { __shared__ float sharedMax; // This function DOES NOT WORK YET! There is a race condition int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int bid = blockIdx.x; //Boss thread in first block initialises the global memory if(tid == 0 && bid == 0) { globalMax[0] = array[i]; } __syncthreads(); //Boss thread initialises the shared counter if(tid == 0) { sharedMax = array[i]; } __syncthreads(); if(i < numElements) { //This has to be handled correctly, otherwise there is a race condition //at this point - the if statement is not synchronised and it overwrites if(array[i] > sharedMax) { __syncthreads(); atomicExch(&sharedMax, array[i]); } } __syncthreads(); //Boss thread in warp overwrites global total if necessary if(i < numElements); { if(tid == 0) { if(sharedMax > globalMax[0]); { __syncthreads(); atomicExch(&globalMax[0], sharedMax); } } } __syncthreads(); } __global__ static void global_Monitor1D(float *globalHist, const float min, const float dval, int histSize, const float *array, const float *weight, const int numElements) { __shared__ float sharedHist[100]; int targetBin; int j; int i = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; //Try doing this one neutron per thread, for fun - and simpler code ;) // THIS IS SLOW, we need a faster, slightly more complex way //Boss thread zeros the shared counter if(tid == 0) { for(j=0; j<100 && j<histSize; j++) { sharedHist[j] = 0.0f; } } __syncthreads(); //Each thread adds the weight of its neutron to the shared total if(i<numElements) { //Add horizontal bit //targetBin = roundf( (array[i] - min)/dval ); targetBin = rintf( (array[i] - min)/dval ); atomicAdd(&sharedHist[targetBin], weight[i]); } __syncthreads(); //Boss thread adds this total to the global total if(i<numElements); { if(tid == 0) { for(j=0; j<100 && j<histSize; j++) atomicAdd(&globalHist[j], sharedHist[j]); } } __syncthreads(); } __global__ static void global_rebinnedPhaseSpace(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) { int targetBinY, targetBinTheta; int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < numElements) { //targetBinY = roundf( (d_pointsY[i] - yMin)/dy ); //targetBinTheta = roundf( (d_pointsTheta[i] - thetaMin)/dtheta ); targetBinY = rintf( (d_pointsY[i] - yMin)/dy ); targetBinTheta = rintf( (d_pointsTheta[i] - thetaMin)/dtheta ); if(targetBinY >= 0 && targetBinY < 100 && targetBinY < histSize) { if(targetBinTheta >= 0 && targetBinTheta < 100 && targetBinTheta < histSize) { //if(!isnan(d_weight[i])) atomicAdd(&globalHist[targetBinTheta][targetBinY], d_weight[i]); } } } } __global__ static void global_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom, const int numElements) { int i = blockIdx.x*blockDim.x + threadIdx.x; bool finished=false; float mval, mirrorAngle, mirrorY; // The next bit of code loops over all particles until they are no longer // reflected in the mirror(s). The way it is written at the moment is that // it keeps looping over the same particle until it is finished. An // alternative way might be that each thread handles a single reflection // case, and a shared bool keeps all threads going until no particles are // reflected. It might be the same speed, but I think this way is faster, // particularly with CUDA. if(i<numElements) { // Don't try to ignore dead neutrons here - it creates NaNs in d_pointsY[i] //if(d_weight[i] > deadWeight) //{ do { finished=true; /* Reflect in the upper plane? */ if(d_pointsY[i] > mirrorYtop) { mval = mTop; mirrorAngle = mirrorAngleTop; mirrorY = mirrorYtop; finished = false; } /* Are we in the lower plane? */ if(d_pointsY[i] < mirrorYbottom) { mval = mBottom; mirrorAngle = mirrorAngleBottom; mirrorY = mirrorYbottom; finished = false; } /* Do we need to do slow work? */ if(finished == false) { d_weight[i] = device_attenuate_alpha(d_weight[i], d_lambda[i], fabsf(d_pointsTheta[i] - mirrorAngle), mval); d_pointsTheta[i] = 2.0*mirrorAngle - d_pointsTheta[i]; /* reflection in Y */ /* pointsY[i] = mirrorY - (pointsY[i] - mirrorY); */ d_pointsY[i] = 2.0*mirrorY - d_pointsY[i]; } } while (finished == false); } } void Sandman::allocateArrays(void) { /// /// Private function to allocate arrays on the GPU for the instance of the /// sandman class. Must be called by constructors. /// std::cout << "\tAllocating arrays" << std::endl; //Initialise random number generator std::cout << "\t\tCreating random number generator on GPU" << std::endl; checkCudaErrors(curandCreateGenerator(&prngGPU, CURAND_RNG_PSEUDO_MTGP32)); checkCudaErrors(curandSetPseudoRandomGeneratorSeed(prngGPU, seed)); std::cout << "\t\tAllocating array pointers on device" << std::endl; //Allocate device memory for random numbers checkCudaErrors(cudaMalloc((void **)&d_r1g, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_r2g, numElements * sizeof(float))); //Allocate device memory for horizontal phase space checkCudaErrors(cudaMalloc((void **)&d_pointsYH, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pointsThetaH, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_weightHg, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_lambdag, numElements * sizeof(float))); //Allocate device memory for vertical phase space checkCudaErrors(cudaMalloc((void **)&d_pointsYV, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_pointsThetaV, numElements * sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_weightVg, numElements * sizeof(float))); //Allocate device memory for temporary array (bender channel number, other funcs) checkCudaErrors(cudaMalloc((void **)&d_tempArray, numElements * sizeof(float))); //Allocate arrays for histograms checkCudaErrors(cudaMalloc((void **)&d_histogram2D, 100*100* sizeof(float))); checkCudaErrors(cudaMalloc((void **)&d_histogram1D, 100* sizeof(float))); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(cudaMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } } Sandman::Sandman(const bool& verbose) { /// /// Constructor, which will generate 100 trajectories and use the standard /// random seed of 777. /// numElements = 100; int nDevices; flux = -1.0; eFlux = -1.0; traj = -1.0; eTraj = -1.0; showCUDAsteps=false; if(verbose) showCUDAsteps=true; displayWelcome(); std::cout << color_yellow << "INITIALISING" << color_reset << std::endl; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("\tDevice Number: %d\n", i); printf("\t\tDevice name: %s\n", prop.name); printf("\t\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\t\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("\t\tPeak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } std::cout << "\tAllocating arrays" << std::endl; allocateArrays(); } Sandman::Sandman(const int nE, const bool& verbose) { /// /// Constructor, which will generate 100 trajectories and use the standard /// random seed of 777. /// @param nE an integer parameter to define how many /// trajectories should be generated. /// \todo Check that the number of /// trajectories does not exceed available GPU memory /// numElements = nE; int nDevices; flux = -1.0; eFlux = -1.0; traj = -1.0; eTraj = -1.0; showCUDAsteps=false; if(verbose) showCUDAsteps=true; displayWelcome(); std::cout << color_yellow << "INITIALISING" << color_reset << std::endl; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("\tDevice Number: %d\n", i); printf("\t\tDevice name: %s\n", prop.name); printf("\t\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\t\tMemory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("\t\tPeak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } std::cout << "\tAllocating arrays" << std::endl; allocateArrays(); } Sandman::~Sandman(void) { /// /// Destructor /// First launches histogram code, then cleans up memory. /// std::cout << color_yellow << "CLEANING UP" << color_reset << std::endl; std::cout << "\tShutting down sandman." << std::endl; if(d_lambdaMonHist != NULL) { executeLambdaMonitor(); checkCudaErrors(cudaFree(d_lambdaMonHist)); } if(d_pointsThetaHsnapshot != NULL && d_pointsYHsnapshot != NULL) { executePhaseSpaceMapH(); checkCudaErrors(cudaFree(d_pointsThetaHsnapshot)); checkCudaErrors(cudaFree(d_pointsYHsnapshot)); } if(d_pointsThetaVsnapshot != NULL && d_pointsYVsnapshot != NULL) { executePhaseSpaceMapV(); checkCudaErrors(cudaFree(d_pointsThetaVsnapshot)); checkCudaErrors(cudaFree(d_pointsYVsnapshot)); } std::cout << "\tFreeing up device memory" << std::endl; if(d_r1g != NULL) checkCudaErrors(cudaFree(d_r1g)); if(d_r2g != NULL) checkCudaErrors(cudaFree(d_r2g)); if(d_pointsYH != NULL) checkCudaErrors(cudaFree(d_pointsYH)); if(d_pointsThetaH != NULL) checkCudaErrors(cudaFree(d_pointsThetaH)); if(d_pointsYV != NULL) checkCudaErrors(cudaFree(d_pointsYV)); if(d_pointsThetaV != NULL) checkCudaErrors(cudaFree(d_pointsThetaV)); if(d_lambdag != NULL) checkCudaErrors(cudaFree(d_lambdag)); if(d_weightHg != NULL) checkCudaErrors(cudaFree(d_weightHg)); if(d_weightVg != NULL) checkCudaErrors(cudaFree(d_weightVg)); if(d_tempArray != NULL) checkCudaErrors(cudaFree(d_tempArray)); if(d_histogram1D != NULL) checkCudaErrors(cudaFree(d_histogram1D)); if(d_histogram2D != NULL) checkCudaErrors(cudaFree(d_histogram2D)); if(d_modFlux != NULL) checkCudaErrors(cudaFree(d_modFlux)); std::cout << "\tShutting down random generator" << std::endl; checkCudaErrors(curandDestroyGenerator(prngGPU)); report(); } void Sandman::report(void) { /// /// Generates report of results /// std::cout << color_yellow << "FINAL REPORT" << color_reset << std::endl; std::cout << "\tNeutron counter:" << std::endl; std::cout << "\t\tGot " << color_green << flux << color_reset << " neutrons per second total current" << std::endl; if(traj > 0.0f) { std::cout << "\tTrajectory counter:" << std::endl; std::cout << "\t\tGot " << traj << " trajectories having started with " << numElements << std::endl; std::cout << "\t\tEfficiency = " << 100*traj/numElements << "%" << std::endl; } } void Sandman::generateBothRandomArrays(void) { /// /// Generates random numbers on both array buffer. Use case: subsequent /// random generation of theta and y values in phase space map. /// generateRandomArray(d_r1g); generateRandomArray(d_r2g); } void Sandman::generateOneRandomArray(void) { /// /// Generates random numbers on only the first array buffers. Use case: /// subsequent generation of wavelength values. /// generateRandomArray(d_r1g); } void Sandman::sandCountNeutrons(void) { /// /// Integrates over all trajectories to estimate the total neutron current. /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// float *d_nSum; float h_nSum[1]; //count, error that way we have one memory transfer for everything std::cout << color_yellow << "NEUTRON COUNTER" << color_reset << std::endl; checkCudaErrors(cudaMalloc((void **)&d_nSum, sizeof(float))); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) { printf("\tCUDA kernel count neutrons with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); } // Zero the count on the host h_nSum[0] = 0.0f; // Copy the zero total to device memory checkCudaErrors(cudaMemcpy(d_nSum, h_nSum, sizeof(float), cudaMemcpyHostToDevice)); // static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float *modFlux, const int numElements) global_countNeutrons<<<blocksPerGrid, threadsPerBlock>>> (d_nSum, d_weightHg, d_weightVg, sourceDeltaLambda, d_modFlux, numElements); //Copy total out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_nSum, d_nSum, sizeof(float), cudaMemcpyDeviceToHost)); flux = *h_nSum; // eFlux = *d_nSum; } void Sandman::sandCountNeutronsSquareCorrected() { /// /// Integrates over all trajectories to estimate the total neutron current, /// and divides by Pi/2 to normalise for square window beam area /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// sandCountNeutrons(); flux = flux / (PI_FLOAT/4.0f); std::cout << "Square beam corrected neutron counter:" << std::endl; std::cout << " Got " << flux << " pseudo neutrons (weight product from both planes)" << std::endl; } void Sandman::sandCountNeutronsCircleCorrected() { /// /// Integrates over all trajectories to estimate the total neutron current, /// and divides by Pi/2 to normalise for square window beam area /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// sandCountNeutrons(); flux = flux / (PI_FLOAT/2.0f); std::cout << "Circular beam corrected neutron counter:" << std::endl; std::cout << " Got " << flux << " pseudo neutrons (weight product from both planes)" << std::endl; } void Sandman::sandCountTrajectories(void) { /// /// Integrates over all trajectories to estimate the total neutron current. /// /// @param nSum pointer to single host memory float to store answer /// @param nSumErr pointer to single host memory float for statistical /// error on total /// /// \todo Either provide or remove nSum nSumErr functionality /// float *d_nSum; float h_nSum[1]; //count, error that way we have one memory transfer for everything checkCudaErrors(cudaMalloc((void **)&d_nSum, sizeof(float))); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel count neutrons with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // Zero the count on the host h_nSum[0] = 0.0f; // Copy the zero total to device memory checkCudaErrors(cudaMemcpy(d_nSum, h_nSum, sizeof(float), cudaMemcpyHostToDevice)); printf("Counting up phase space\n"); // static void global_countNeutrons(float *numNeutrons, const float *weightH, const float *weightV, const float *modFlux, const int numElements) global_countTrajectories<<<blocksPerGrid, threadsPerBlock>>> (d_nSum, d_weightHg, d_weightVg, numElements); //Copy total out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_nSum, d_nSum, sizeof(float), cudaMemcpyDeviceToHost)); traj = *h_nSum; // eFlux = *d_nSum; } void Sandman::lambdaMonitor(const std::string setFilename, const float setLambdaMin, const float setLambdaMax, int setLambdaHistSize) { /// /// Sets up a wavelength spectrum histogram to be completed by the destructor. /// /// @param setFilename std::string name of file to use for output of the histogram. /// @param setLambdaMin the minimum wavelength value to use /// @param setLambdaMax the maximum wavelength value to use /// @param setLambdaHistSize the number of bins in the histogram (max 100) /// std::string manipulatedFilename; lambdaMin = setLambdaMin; lambdaMax = setLambdaMax; if(abs(lambdaMax - lambdaMin) < 0.0001) { //That would produce an error, make wavelength band 1.0 angstroms lambdaMax = lambdaMin + 1.0f; } lambdaHistSize = setLambdaHistSize; if(lambdaHistSize > 100) { lambdaHistSize = 100; } manipulatedFilename = setFilename; manipulatedFilename = remove_extension(manipulatedFilename); manipulatedFilename = manipulatedFilename + "Lambda1D.csv"; lambdaFileName = manipulatedFilename; //Allocate arrays. The actual lambda monitor is called in the destructor //once the trajectory weights are known checkCudaErrors(cudaMalloc((void **)&d_lambdaMonHist, 100* sizeof(float))); if(d_lambdaMonHist == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate array d_lambdaMonHist" << std::endl; exit(1); } } void Sandman::executeLambdaMonitor(void) { /// /// Performs the wavelength histogram calculation set up by lambdaMonitor, /// when called by the destructor. /// float *h_lambdaHist=NULL; float runningLambda; float lambdaIntegral=0.0; if(lambdaHistSize > 100) lambdaHistSize = 100; int i; const float dLambda=(lambdaMax-lambdaMin) / (float)lambdaHistSize; std::ofstream outfile; std::cout << color_yellow << "LAMBDA HISTOGRAM CONSTRUCTION" << color_reset << std::endl; outfile.open(lambdaFileName.c_str()); if(outfile.fail()) { std::cerr << "ERROR opening file " << lambdaFileName << std::endl; return; } h_lambdaHist = (float*) malloc(lambdaHistSize*sizeof(float)); if(h_lambdaHist == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " allocating host memory in executeLambdaMonitor" << std::endl; exit(1); } if(d_histogram1D == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " device memory pointer is NULL in executeLambdaMonitor" << std::endl; exit(1); } #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif // Zero the count histogram zeroHistogram1D(); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel lambdamonitor[" << lambdaHistSize << "] with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambdaH, const float *lambdaV, const float *weightH, const float *weightV, const int numElements) global_lambdaMonitor<<<blocksPerGrid, threadsPerBlock>>> (d_histogram1D, lambdaMin, dLambda, lambdaHistSize, d_lambdag, d_weightHg, d_weightVg, d_modFlux, sourceDeltaLambda, numElements); //Copy total out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_lambdaHist, d_histogram1D, lambdaHistSize*sizeof(float), cudaMemcpyDeviceToHost)); //Write out file from host memory runningLambda = lambdaMin; for(i=0; i < lambdaHistSize; i++) { outfile << runningLambda << "," << h_lambdaHist[i] << std::endl; runningLambda = runningLambda + dLambda; lambdaIntegral += h_lambdaHist[i]*dLambda; } outfile.close(); std::cout << "\tLambda monitor file written. Integral current = " << lambdaIntegral << " n/s" << std::endl; free(h_lambdaHist); } void Sandman::sandPosMonitorH(const std::string filename, const float min, const float max, int histSize) { /// /// Sets up a position histogram to be completed by the destructor. /// /// @param filename std::string name of file to use for output of the /// histogram. /// @param min the minimum position value to use /// @param max the maximum position value to use /// @param histSize the number of bins in the histogram (max 100) /// /// \todo Complete this function, like the lambdahistrogram function /// float *h_hist; float runningX; if(histSize > 100) histSize = 100; int i; const float dval=fabs(max-min) / (float)histSize; std::ofstream outfile; outfile.open(filename.c_str()); if(outfile.fail()) { std::cerr << "ERROR opening file " << filename << std::endl; return; } h_hist = (float*) malloc(histSize*sizeof(float)); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("CUDA posMonitorH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); printf("H position monitor\n"); // Zero the count histogram zeroHistogram1D(); //void global_lambdaMonitor(float *lambdaHist, const float lambdaMin, const float dLambda, int histSize, const float *lambdaH, const float *lambdaV, const float *weightH, const float *weightV, const int numElements) global_Monitor1D<<<blocksPerGrid, threadsPerBlock>>> (d_histogram1D, min, dval, histSize, d_pointsYH, d_weightHg, numElements); //Copy total out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_hist, d_histogram1D, histSize*sizeof(float), cudaMemcpyDeviceToHost)); //Write out file from host memory runningX = min; for(i=0; i<histSize; i++) { outfile << runningX << "," << h_hist[i] << std::endl; runningX = runningX + dval; } outfile.close(); free(h_hist); } void Sandman::phaseSpaceMapH(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) { //Create snapshots strcpy(filenameSnapshot, filename); yminSnapshot = ymin; ymaxSnapshot = ymax; thetaMinSnapshot = thetaMin; thetaMaxSnapshot = thetaMax; if( d_pointsThetaHsnapshot != NULL || d_pointsYHsnapshot != NULL ) { std::cout << color_red << "ERROR:" << color_reset << " only one type of beam monitor snapshot can be used at a time (lambda & horizontal phase space monitors use same snapshot arrays)" << std::endl; exit(1); } checkCudaErrors(cudaMalloc((void **)&d_pointsThetaHsnapshot, numElements*sizeof(float))); if(d_pointsThetaHsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapH for theta" << std::endl; exit(1); } checkCudaErrors(cudaMalloc((void **)&d_pointsYHsnapshot, numElements*sizeof(float))); if(d_pointsYHsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapH for Y" << std::endl; exit(1); } if(d_pointsYH == NULL || d_pointsThetaH == NULL) { std::cerr << "OMG: Copying from unallocated array" << std::endl; exit(1); } //If we get here, then the memory was allocated just fine. int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel copyArray for phaseSpaceMapH with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //Snapshot negative theta global_copyArray<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaH, d_pointsThetaHsnapshot, numElements, true); //Snapshot positive Y global_copyArray<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_pointsYHsnapshot, numElements, false); } void Sandman::phaseSpaceMapV(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) { //Create snapshots strcpy(filenameSnapshot, filename); yminSnapshot = ymin; ymaxSnapshot = ymax; thetaMinSnapshot = thetaMin; thetaMaxSnapshot = thetaMax; if( d_pointsThetaVsnapshot != NULL || d_pointsYVsnapshot != NULL ) { std::cout << color_red << "ERROR:" << color_reset << " only one type of 2D beam monitor snapshot can be created" << std::endl; exit(1); } checkCudaErrors(cudaMalloc((void **)&d_pointsThetaVsnapshot, numElements*sizeof(float))); if(d_pointsThetaVsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapV for theta" << std::endl; exit(1); } checkCudaErrors(cudaMalloc((void **)&d_pointsYVsnapshot, numElements*sizeof(float))); if(d_pointsYVsnapshot == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failed to allocate device memory in setupPhaseSpaceMapV for Y" << std::endl; exit(1); } if(d_pointsYH == NULL || d_pointsThetaH == NULL) { std::cerr << "OMG: Trying to copy from an unallocated array" << std::endl; exit(1); } //If we get here, then the memory was allocated just fine. int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel copyArray for phaseSpaceMapV with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //Snapshot negative theta global_copyArray<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaV, d_pointsThetaVsnapshot, numElements, true); //Snapshot positive Y global_copyArray<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_pointsYVsnapshot, numElements, false); } void Sandman::executePhaseSpaceMapH(void) { /// /// Computes a full phase space map in the horizontal plane /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// @param ymin the minimum position value to use (m) /// /// @param ymax the maximum position value to use (m) /// /// @param thetaMin the minimum divergence value to use (radians) /// /// @param thetaMax the maximum divergence value to use (radians) /// float *h_histogram=NULL; float *d_boundary=NULL; float runningY = yminSnapshot; float runningTheta = thetaMinSnapshot; float dy = fabs(ymaxSnapshot-yminSnapshot)/100.0f; float dtheta = fabs(thetaMaxSnapshot - thetaMinSnapshot)/100.0f; std::cout << color_yellow << "HORIZONTAL ACCEPTANCE DIAGRAM CONSTRUCTION" << color_reset << std::endl; h_histogram = (float*) malloc(100*100*sizeof(float)); if(h_histogram == NULL) { std::cerr << "Error allocating host memory in phaseSpaceMapH" << std::endl; exit(1); } std::ofstream dataFile; int i,j; // Allocate device float for min, max etc checkCudaErrors(cudaMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapH for d_boundary" << std::endl; exit(1); } // Zero the count histogram zeroHistogram2D(); #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "\tCUDA kernel rebinnedPhaseSpace with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) global_rebinnedPhaseSpace<<<blocksPerGrid, threadsPerBlock>>> ((float (*)[100])d_histogram2D, d_pointsYHsnapshot, d_pointsThetaHsnapshot, yminSnapshot, dy, thetaMinSnapshot, dtheta, 100, d_weightHg, numElements); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif //Get data from GPU checkCudaErrors(cudaMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), cudaMemcpyDeviceToHost)); dataFile.open(filenameSnapshot); if(!dataFile.good()) { std::cerr << "ERROR opening " << filenameSnapshot << " for writing" << std::endl; return; } else std::cout << "\tWriting 2D monitor file " << filenameSnapshot << std::endl; for(i=0; i<100; i++) { for(j=0; j<100; j++) { runningTheta = thetaMinSnapshot + dtheta * (float) j; runningY = yminSnapshot + dy * (float) i; //[theta][y] dataFile << runningTheta << "," << runningY << "," << h_histogram[j*100+i] << std::endl; } } dataFile.close(); free(h_histogram); if(d_boundary != NULL) checkCudaErrors(cudaFree(d_boundary)); } void Sandman::executePhaseSpaceMapV(void) { /// /// Computes a full phase space map in the vertical plane /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// @param ymin the minimum position value to use (m) /// /// @param ymax the maximum position value to use (m) /// /// @param thetaMin the minimum divergence value to use (radians) /// /// @param thetaMax the maximum divergence value to use (radians) /// float *h_histogram=NULL; float *d_boundary=NULL; float runningY = yminSnapshot; float runningTheta = thetaMinSnapshot; float dy = fabs(ymaxSnapshot-yminSnapshot)/100.0f; float dtheta = fabs(thetaMaxSnapshot - thetaMinSnapshot)/100.0f; std::cout << color_yellow << "HORIZONTAL ACCEPTANCE DIAGRAM CONSTRUCTION" << color_reset << std::endl; h_histogram = (float*) malloc(100*100*sizeof(float)); if(h_histogram == NULL) { std::cerr << "Error allocating host memory in phaseSpaceMapV" << std::endl; exit(1); } std::ofstream dataFile; int i,j; // Allocate device float for min, max etc checkCudaErrors(cudaMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapV for d_boundary" << std::endl; exit(1); } // Zero the count histogram zeroHistogram2D(); #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; std::cout << "\tCUDA kernel rebinnedPhaseSpaceV with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) global_rebinnedPhaseSpace<<<blocksPerGrid, threadsPerBlock>>> ((float (*)[100])d_histogram2D, d_pointsYVsnapshot, d_pointsThetaVsnapshot, yminSnapshot, dy, thetaMinSnapshot, dtheta, 100, d_weightVg, numElements); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif //Get data from GPU checkCudaErrors(cudaMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), cudaMemcpyDeviceToHost)); dataFile.open(filenameSnapshot); if(!dataFile.good()) { std::cerr << "ERROR opening " << filenameSnapshot << " for writing" << std::endl; return; } else std::cout << "\tWriting 2D monitor file " << filenameSnapshot << std::endl; for(i=0; i<100; i++) { for(j=0; j<100; j++) { runningTheta = thetaMinSnapshot + dtheta * (float) j; runningY = yminSnapshot + dy * (float) i; //[theta][y] dataFile << runningTheta << " " << runningY << " " << h_histogram[j*100+i] << std::endl; } } dataFile.close(); free(h_histogram); if(d_boundary != NULL) checkCudaErrors(cudaFree(d_boundary)); } void Sandman::phaseSpaceMapH(const char *filename) { /// /// Computes a full phase space map in the horizontal plane, autodetecting /// the boundaries. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// // float *h_histogram=NULL; float *d_boundary=NULL; // float runningY; // float runningTheta; float dy; float dtheta; float thLo, thHi, yLo, yHi; // h_histogram = (float*) malloc(100*100*sizeof(float)); // if(h_histogram == NULL) // { // std::cerr << "Error allocating host memory in phaseSpaceMapH" << std::endl; // exit(1); // } // std::ofstream dataFile; // int i,j; // Allocate device float for min, max etc checkCudaErrors(cudaMalloc((void **)&d_boundary, sizeof(float))); if(d_boundary == NULL) { std::cerr << "Error allocating device memory in phaseSpaceMapH for d_boundary" << std::endl; exit(1); } //Autodetect minimum and maximum theta std::cout << " Phase space theta minimum:" << std::endl; thLo = arrayMinimum(d_pointsThetaH, d_boundary); std::cout << " Phase space theta maximum:" << std::endl; thHi = arrayMaximum(d_pointsThetaH, d_boundary); dtheta = fabs(thLo-thHi)/100.0f; //Pad by one bin thLo = thLo - dtheta; thHi = thHi + dtheta; //Autodetect minimum and maximum y std::cout << " Phase space Y minimum:" << std::endl; yLo = arrayMinimum(d_pointsYH, d_boundary); std::cout << " Phase space Y maximum:" << std::endl; yHi = arrayMaximum(d_pointsYH, d_boundary); //Pad by one bin dy = fabs(yHi - yLo)/100.0f; yLo = yLo - dy; yHi = yHi + dy; //Pipe this now through the other function //void Sandman::phaseSpaceMapH(const char *filename, const float ymin, const float ymax, const float thetaMin, const float thetaMax) phaseSpaceMapH(filename, yLo, yHi, thLo, thHi); // // Zero the count histogram // zeroHistogram2D(); // #ifdef DEBUG // cudaError_t errSync = cudaGetLastError(); // cudaError_t errAsync = cudaDeviceSynchronize(); // if (errSync != cudaSuccess) // std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; // if (errAsync != cudaSuccess) // std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; // #endif // printf("2D histogram phase space H...\n\n"); // int threadsPerBlock = 256; // int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; // std::cout << "CUDA kernel rebinnedPhaseSpaceH, auto boundary detect, with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; // //void global_rebinnedPhaseSpaceH(float globalHist[100][100], const float *d_pointsY, const float *d_pointsTheta, const float yMin, const float dy, const float thetaMin, const float dtheta, int histSize, const float *d_weight, const int numElements) // // global_rebinnedPhaseSpaceH<<<blocksPerGrid, threadsPerBlock>>> // // ((float (*)[100])d_histogram2D, d_pointsYH, d_pointsThetaH, ymin, dy, thetaMin, dtheta, 100, d_weightHg, numElements); // global_rebinnedPhaseSpaceH<<<blocksPerGrid, threadsPerBlock>>> // ((float (*)[100])d_histogram2D, d_pointsYH, d_pointsThetaH, yLo, dy, thLo, dtheta, 100, d_weightHg, numElements); // #ifdef DEBUG // if (errSync != cudaSuccess) // std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; // if (errAsync != cudaSuccess) // std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; // #endif // //Get data from GPU // checkCudaErrors(cudaMemcpy(h_histogram, d_histogram2D, 100*100 * sizeof(float), cudaMemcpyDeviceToHost)); // dataFile.open(filename); // if(!dataFile.good()) // { // std::cerr << "ERROR opening " << filename << " for writing" << std::endl; // return; // } // else // std::cout << "Writing 2D monitor file " << filename << std::endl; // for(i=0; i<100; i++) // { // for(j=0; j<100; j++) // { // runningTheta = thLo + dtheta * (float) j; // runningY = yLo + dy * (float) i; // //[theta][y] // dataFile << runningTheta << " " << runningY << " " << h_histogram[j*100+i] << std::endl; // } // } // dataFile.close(); // free(h_histogram); if(d_boundary != NULL) checkCudaErrors(cudaFree(d_boundary)); } void Sandman::phaseSpaceMapHCPU(const char *filename) { /// /// Computes a full phase space map in the horizontal plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsY=NULL; float *h_pointsTheta=NULL; float *h_weight=NULL; int dumped=0; h_pointsY = (float*) malloc(numElements*sizeof(float)); if(h_pointsY == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsY" << std::endl; exit(1); } h_pointsTheta = (float*) malloc(numElements*sizeof(float)); if(h_pointsTheta == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsTheta" << std::endl; exit(1); } h_weight = (float*) malloc(numElements*sizeof(float)); if(h_weight == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_weight" << std::endl; exit(1); } std::ofstream dataFile; int i; //Get data from GPU sandGetPhaseSpaceH(h_pointsY, h_pointsTheta, h_weight); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the output to 20000 points - this could be a shit load of data for(i=0; i<numElements && dumped<200000; i++) { if(h_weight[i] > deadWeight) { dataFile << h_pointsTheta[i]*180.0f/PI_FLOAT << "," << h_pointsY[i] << "," << h_weight[i] << std::endl; dumped++; } } dataFile.close(); free(h_pointsY); free(h_pointsTheta); free(h_weight); } void Sandman::phaseSpaceMapVCPU(const char *filename) { /// /// Computes a full phase space map in the vertical plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsY=NULL; float *h_pointsTheta=NULL; float *h_weight=NULL; h_pointsY = (float*) malloc(numElements*sizeof(float)); if(h_pointsY == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsY" << std::endl; exit(1); } h_pointsTheta = (float*) malloc(numElements*sizeof(float)); if(h_pointsTheta == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_pointsTheta" << std::endl; exit(1); } h_weight = (float*) malloc(numElements*sizeof(float)); if(h_weight == NULL) { std::cerr << "phaseSpaceMapH cannot allocate memory for h_weight" << std::endl; exit(1); } std::ofstream dataFile; int i; //Get data from GPU sandGetPhaseSpaceV(h_pointsY, h_pointsTheta, h_weight); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the output to 200000 points - this could be a shit load of data for(i=0; i<numElements && i<200000; i++) { if(h_weight[i] > deadWeight) dataFile << h_pointsTheta[i]*180.0f/PI_FLOAT << "," << h_pointsY[i] << "," << h_weight[i] << std::endl; } dataFile.close(); free(h_pointsY); free(h_pointsTheta); free(h_weight); } void Sandman::debugPosPosCPU(const char *filename) { /// /// Computes a full phase space map in the vertical plane, autodetecting /// the boundaries. This fuction runs on the CPU and requires the full /// phase space to be copied over to host Ram, so it is SLOOOOOW. /// /// @param filename pointer to const char name of file to use for output of /// the histogram. /// /// However, it is provided because it is probably very good for unit /// testing etc. /// float *h_pointsH=NULL; float *h_weightH=NULL; float *h_pointsV=NULL; float *h_weightV=NULL; h_pointsH = (float*) malloc(numElements*sizeof(float)); if(h_pointsH == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_pointsH" << std::endl; exit(1); } h_pointsV = (float*) malloc(numElements*sizeof(float)); if(h_pointsV == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_pointsV" << std::endl; exit(1); } h_weightH = (float*) malloc(numElements*sizeof(float)); if(h_weightH == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_weightH" << std::endl; exit(1); } h_weightV = (float*) malloc(numElements*sizeof(float)); if(h_weightV == NULL) { std::cerr << "DebugPosPosCPU cannot allocate memory for h_weightV" << std::endl; exit(1); } std::ofstream dataFile; int i; int dumped=0; //Get data from GPU sandDebugPosPos(h_pointsH, h_weightH, h_pointsV, h_weightV); dataFile.open(filename); if(!dataFile.good()) { std::cerr << "ERROR opening " << filename << " for writing" << std::endl; return; } //Limit the function to considering 100000 points - this could be a shit load of data for(i=0; i<numElements && dumped < 100000; i++) { if(h_weightH[i] > deadWeight && h_weightV[i] > deadWeight) { dataFile << h_pointsH[i] << "\t" << h_pointsV[i] << "\t" << h_weightH[i]*h_weightV[i] << std::endl; dumped++; } } dataFile.close(); free(h_pointsH); free(h_pointsV); free(h_weightH); free(h_weightV); } void Sandman::sandSkewCUDA(const float distance_m) { /// /// Calls the CUDA kernels to compute a skew operation on both phase space /// maps to propagate the beam a certain distance within the small angle /// limit. /// /// @param distance_m the distance the beam must propagate in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel skew with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //void device_sandSkewCUDA(float *d_pointsY, const float *d_pointsTheta, float *d_weight, const float distance_m, const int numElements) global_sandSkewCUDA<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_pointsThetaH, distance_m, numElements); global_sandSkewCUDA<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_pointsThetaV, distance_m, numElements); } void Sandman::sandCollimateCUDA(const float divergenceH, const float divergenceV) { /// /// Calls the CUDA kernels to compute a collimation operation, setting the /// weight to zero on trajectories falling outside the divergence window /// requested. /// /// @param divergenceH the half width divergence limit in the horizontal plane (radians) /// /// @param divergenceV the half width divergence limit in the vertical plane (radians) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel collimation at %f and %f with %d blocks of %d threads\n", divergenceH, divergenceV, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_collimation<<<blocksPerGrid, threadsPerBlock>>> (d_weightHg, d_pointsThetaH, -fabs(divergenceH), fabs(divergenceH), numElements); global_collimation<<<blocksPerGrid, threadsPerBlock>>> (d_weightVg, d_pointsThetaV, -fabs(divergenceV), fabs(divergenceV), numElements); } //////////////////////////////////////// // // Apertures // //////////////////////////////////////// void Sandman::sandApertureV(const float window_height) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_height the full height of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of height %f with %d blocks of %d threads\n", window_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_aperture<<<blocksPerGrid, threadsPerBlock>>> (d_weightVg, d_pointsYV, -fabs(window_height/2.0f), fabs(window_height/2.0f), numElements); } void Sandman::sandApertureH(const float window_width) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of width %f with %d blocks of %d threads\n", window_width, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_aperture<<<blocksPerGrid, threadsPerBlock>>> (d_weightHg, d_pointsYH, -fabs(window_width/2.0f), fabs(window_width/2.0f), numElements); } void Sandman::sandApertureCUDA(const float window_width, const float window_height, bool silent) { /// /// Calls the CUDA kernels to compute an aperture operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// /// @param window_height the full height of the window in metres. /// if(!silent) { std::cout << color_yellow << "APERTURE MASK" << color_reset << std::endl; std::cout << "\twidth = " << window_width << std::endl; std::cout << "\theight = " << window_height << std::endl; } else { std::cout << "Optical unit entrance mask" << std::endl; std::cout << "\twidth = " << window_width << std::endl; std::cout << "\theight = " << window_height << std::endl; } int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel aperture of width %f and height %f with %d blocks of %d threads\n", window_width, window_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_aperture<<<blocksPerGrid, threadsPerBlock>>> (d_weightHg, d_pointsYH, -fabs(window_width/2.0f), fabs(window_width/2.0f), numElements); global_aperture<<<blocksPerGrid, threadsPerBlock>>> (d_weightVg, d_pointsYV, -fabs(window_height/2.0f), fabs(window_height/2.0f), numElements); } ///////////////////////////////// // // Beamstops // ///////////////////////////////// void Sandman::sandBeamstopV(const float beamstop_height) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling inside the position window /// requested. /// /// @param beamstop_height the full height of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of height %f with %d blocks of %d threads\n", beamstop_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_beamstop<<<blocksPerGrid, threadsPerBlock>>> (d_weightVg, d_pointsYV, -fabs(beamstop_height/2.0f), fabs(beamstop_height/2.0f), numElements); } void Sandman::sandBeamstopH(const float beamstop_width) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling inside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of width %f with %d blocks of %d threads\n", beamstop_width, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_beamstop<<<blocksPerGrid, threadsPerBlock>>> (d_weightHg, d_pointsYH, -fabs(beamstop_width/2.0f), fabs(beamstop_width/2.0f), numElements); } void Sandman::sandBeamstopCUDA(const float beamstop_width, const float beamstop_height) { /// /// Calls the CUDA kernels to compute a beamstop operation, setting the /// weight to zero on trajectories falling outside the position window /// requested. /// /// @param window_width the full width of the window in metres. /// /// @param window_height the full height of the window in metres. /// std::cout << color_yellow << "BEAMSTOP" << color_reset << std::endl; std::cout << "\twidth = " << beamstop_width << std::endl; std::cout << "\theight = " << beamstop_height << std::endl; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel beamstop of width %f and height %f with %d blocks of %d threads\n", beamstop_width, beamstop_height, blocksPerGrid, threadsPerBlock); // void global_collimation(float *d_weight, const float *d_pointsTheta, const float lower_angle, const float upper_angle, const int numElements) global_beamstop<<<blocksPerGrid, threadsPerBlock>>> (d_weightHg, d_pointsYH, -fabs(beamstop_width/2.0f), fabs(beamstop_width/2.0f), numElements); global_beamstop<<<blocksPerGrid, threadsPerBlock>>> (d_weightVg, d_pointsYV, -fabs(beamstop_height/2.0f), fabs(beamstop_height/2.0f), numElements); } ///////////////////////////////// // // Moderators // ///////////////////////////////// void Sandman::sandModerator(const float width, const float height, const float hoffset, const float voffset, const float temp, const float num) { /// /// Calls the CUDA kernels to compute a single moderator window, which sets /// the weight to zero on trajectories falling outside the position window /// requested, and calculates the neutron current represented by the /// trajectory. /// /// @param width the width of the moderator in metres /// /// @param height the height of the moderator in metres /// /// @param hoffset the perpendicular horizontal offset of the moderator /// (left is positive, imagined from a view top down with the moderator at /// the bottom and the sample at the top, relative to the beam axis centre /// at the guide entrance. /// /// @param voffset the perpendicular vertical offset of the moderator (up is /// positive, imagined from a side view with the moderator on the left and /// the sample to the right, relative to the beam axis centre at the guide /// entrance. /// /// @param temp the characteristic temperature of the maxwellian distribution (kelvin) /// /// @param num the characteristic brightness of the maxwellian distribution /// (neutrons per second per cm2 per steradian per angstrom) /// /// @note the maxwellian distribution calculation is the same used in MCSTAS /// (and nads). VITESS uses a different definition of brightness and solid /// angle. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandModerator of width %f and height %f with %d blocks of %d threads\n", width, height, blocksPerGrid, threadsPerBlock); //static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) global_sandModerator1<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements, width, hoffset, temp, num); global_sandModerator1<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements, width, hoffset, temp, num); } void Sandman::sandBrillianceTransferModerator(const float width, const float height, const float hoffset, const float voffset) { /// /// Calls the CUDA kernels to compute a single moderator window, /// which sets the weight to zero on trajectories falling outside /// the position window requested, and otherwise scores the neutron /// at its transmission weight. /// /// @param width the width of the moderator in metres /// /// @param height the height of the moderator in metres /// /// @param hoffset the perpendicular horizontal offset of the moderator /// (left is positive, imagined from a view top down with the moderator at /// the bottom and the sample at the top, relative to the beam axis centre /// at the guide entrance. /// /// @param voffset the perpendicular vertical offset of the moderator (up is /// positive, imagined from a side view with the moderator on the left and /// the sample to the right, relative to the beam axis centre at the guide /// entrance. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandBrillianceTransferModerator of width %f and height %f with %d blocks of %d threads\n", width, height, blocksPerGrid, threadsPerBlock); //static void global_sandModerator1(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements, const float width, const float hoffset, const float temp, const float num) global_sandBrillianceTransferModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements, width, hoffset); global_sandBrillianceTransferModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements, width, hoffset); } void Sandman::sandILLHCSModerator(void) { /// /// A tool to call a standard moderator kernel providing a triple maxwellian /// moderator matching the ILL horizontal cold source dimensions, based on /// the work of E. Farhi in 2008-2009 to calculate the absolute brightness /// via extrapolation. This benchmark moderator was used in the NADS work, /// so is a useful cross-check. /// sandApertureCUDA(0.186, 0.186, true); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandILLHCSModerator with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(cudaMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } //global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) global_sandILLHCSModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements); global_sandILLHCSModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements); } void Sandman::sandPSIModerator(void) { /// /// A tool to call a standard moderator kernel providing a triple maxwellian /// moderator matching the ILL horizontal cold source dimensions, based on /// the work of E. Farhi in 2008-2009 to calculate the absolute brightness /// via extrapolation. This benchmark moderator was used in the NADS work, /// so is a useful cross-check. /// sandApertureCUDA(0.3, 0.3, true); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sandILLHCSModerator with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //Moderator brightness curve if(d_modFlux == NULL) checkCudaErrors(cudaMalloc((void **)&d_modFlux, numElements * sizeof(float))); if(d_modFlux == NULL) { std::cerr << color_red << "ERROR:" << color_reset << " failure to allocate memory for moderator brightness curve" << std::endl; exit(1); } //global_sandILLHCSModerator(float *d_modFluxH, float *d_weightH, const float *d_lambdag, const float *d_pointsYH, const int numElements) global_sandPSIModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightHg, d_lambdag, d_pointsYH, numElements); global_sandPSIModerator<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, d_weightVg, d_lambdag, d_pointsYV, numElements); } void Sandman::sandReflectionH(const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom) { /// /// Calls the CUDA kernels to compute a single channel guide reflection in /// the horizontal plane /// /// @param mirrorYtop upper mirror surface in phase space (since this is horizontal, top = left) in metres /// /// @param mirrorYbottom lower mirror surface in phase space (since this is horizontal, bottom = right) in metres /// /// @param mirrorAngleTop angle of inclination of upper mirror surface (radians) /// /// @param mirrorAngleBottom angle of inclination of lower mirror surface (radians) /// /// @param mTop supermirror m value of upper mirror /// /// @param mBottom supermirror m value of lower mirror /// /// /// @note the maths from this operation is a carbon copy of the nads code /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel reflection with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); /* void device_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mValue, const int numElements) */ global_sandReflection<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_pointsThetaH, d_lambdag, d_weightHg, mirrorYtop, mirrorYbottom, mirrorAngleTop, mirrorAngleBottom, mTop, mBottom, numElements); } void Sandman::sandReflectionV(const float mirrorYtop, const float mirrorYbottom, const float mirrorAngleTop, const float mirrorAngleBottom, const float mTop, const float mBottom) { /// /// Calls the CUDA kernels to compute a single channel guide reflection in /// the vertical plane /// /// @param mirrorYtop upper mirror surface in phase space in metres /// /// @param mirrorYbottom lower mirror surface in phase space in metres /// /// @param mirrorAngleTop angle of inclination of upper mirror surface (radians) /// /// @param mirrorAngleBottom angle of inclination of lower mirror surface (radians) /// /// @param mTop supermirror m value of upper mirror /// /// @param mBottom supermirror m value of lower mirror /// /// /// @note the maths from this operation is a carbon copy of the nads code /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel reflection with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); /* void device_sandReflection(float *d_pointsY, float *d_pointsTheta, const float *d_lambda, float *d_weight, const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mValue, const int numElements) */ global_sandReflection<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_pointsThetaV, d_lambdag, d_weightVg, mirrorYtop, mirrorYbottom, mirrorAngleTop, mirrorAngleBottom, mTop, mBottom, numElements); } void Sandman::sandRotation(const float angleH, const float angleV) { /// /// Calls the CUDA kernels to shift both horizontal and vertical phase spaces in the theta plane (rotation of beam) /// /// @param angleH horizontal angle of beam rotation (radians) /// /// @param angleV vertical angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotation with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) global_rotation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaH, angleH, numElements); global_rotation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaV, angleV, numElements); } void Sandman::sandRotationH(const float angleH) { /// /// Calls the CUDA kernel to shift the horizontal phase space in the theta plane (rotation of beam) /// /// @param angleH horizontal angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotationH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) global_rotation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaH, angleH, numElements); } void Sandman::sandRotationV(const float angleV) { /// /// Calls the CUDA kernel to shift the vertical phase space in the theta plane (rotation of beam) /// /// @param angleV vertical angle of beam rotation (radians) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rotationV with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_rotation(float *d_pointsTheta, const float angle_radians, const int numElements) global_rotation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsThetaV, angleV, numElements); } void Sandman::sandTranslationH(const float distance) { /// /// Calls the CUDA kernel to shift the horizontal phase space in the y plane (shift of beam axis) /// /// @param distance horizontal shift of beam (metres) /// /// \todo Check in NADS and document the positive / negative axes of this function. /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel translationH with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) global_translation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, distance, numElements); } void Sandman::sandTranslationV(const float distance) { /// /// Calls the CUDA kernel to shift the vertical phase space in the y plane (shift of beam axis) /// /// @param distance vertical shift of beam (metres) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel translationV with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) global_translation<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, distance, numElements); } void Sandman::sandRollPhaseSpace(const float theta) { /// /// Calls the CUDA kernel to rotate the beam around its own axis, mixing the phase space in both planes /// /// @param theta rotation angle (degrees) /// int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel rollPhaseSpace with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //static void global_translation(float *d_pointsY, const float distance_m, const int numElements) global_roll_phase_space<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_pointsThetaH, d_weightHg, d_pointsYV, d_pointsThetaV, d_weightVg, theta, numElements); } void Sandman::sandFreeSpaceCUDA(const float distance, const bool& verbose) { /// /// Free space is another name for skew operation. This models the flight /// of a neutron beam in the small angle limit by skewing the phase space. /// /// @param distance distance to transport the neutron beam (metres) /// //This could be a sub-module, so only display if the user explicitly calls //this function without flagging the verbose option if(verbose) std::cout << color_yellow << "FREE SPACE" << color_reset << std::endl; sandSkewCUDA(distance); } void Sandman::sandGuideElementCUDA( const float length, const float entr_width, const float exit_width, const float exit_offset_h, const float mLeft, const float mRight, const float entr_height, const float exit_height, const float exit_offset_v, const float mTop, const float mBottom ) { /// /// Models a single piece of neutron guide by calling associated class /// functions, which in turn call cuda kernels. /// /// @param length length of guide element in metres /// /// @param entr_width width of entrance of guide element in metres /// /// @param exit_width width of exit of guide element in metres /// /// @param exit_offset_h horizontal offset of beam centre at the exit, relative to the entrance, in metres. /// /// @param mLeft the supermirror m value of the left side of the guide (left when looking at sample from neutron point of view) /// /// @param mRight the supermirror m value of the right side of the guide (right when looking at sample from neutron point of view) /// /// @param entr_height height of entrance of guide element in metres /// /// @param exit_height height of exit of guide element in metres /// /// @param exit_offset_v vertical offset of beam centre at the exit, relative to the entrance, in metres. /// /// @param mTop the supermirror m value of the top side of the guide /// /// @param mBottom the supermirror m value of the bottom side of the guide /// const float guideAngleTop = atan( (exit_offset_v + 0.5*(exit_height - entr_height)) / length); const float guideAngleBot = atan( (exit_offset_v + 0.5*(entr_height - exit_height)) / length); const float guideAngleLeft = atan( (exit_offset_h + 0.5*(exit_width - entr_width)) / length); const float guideAngleRight = atan( (exit_offset_h + 0.5*(entr_width - exit_width)) / length); //Propagate the neutrons to the end of the guide first sandSkewCUDA(length); //Reflect the vertical plane //sandReflectionH(const float mirrorY1, const float mirrorY2, const float mirrorAngle1, const float mirrorAngle2, const float mTop, const float mBottom, const int numElements) // sandReflectionH( // 0.5f*exit_width + exit_offset_h, // mirror top // -0.5f*exit_width + exit_offset_h,// mirror bottom // guideAngleTop, // guideAngleBot, // mTop, // mBottom); // sandReflectionV( // 0.5f*exit_height + exit_offset_v, mirror top // -0.5f*exit_height + exit_offset_v,mirror bottom // guideAngleLeft, // guideAngleRight, // mLeft, // mRight); //ERROR - this was H, width, top, bottom! sandReflectionV( 0.5f*exit_height + exit_offset_v, //mirror top -0.5f*exit_height + exit_offset_v,//mirror bottom guideAngleTop, guideAngleBot, mTop, mBottom); //ERROR - this was V, height, left right! sandReflectionH( 0.5f*exit_width + exit_offset_h, //mirror top -0.5f*exit_width + exit_offset_h,//mirror bottom guideAngleLeft, guideAngleRight, mLeft, mRight); } void Sandman::sandSimpleStraightGuide( const float length, const float width, const float height, const float mval ) { /// /// A simple utility function for a straight guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "STRAIGHT GUIDE" << color_reset << std::endl; std::cout << "\twidth = " << width << std::endl; std::cout << "\theight = " << height << std::endl; std::cout << "\tlength = " << length << std::endl; std::cout << "\t m = " << mval << std::endl; sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tStraight guide finished" << std::endl; } void Sandman::sandTaperedStraightGuide( const float length, const float entranceWidth, const float entranceHeight, const float exitWidth, const float exitHeight, const float mval ) { /// /// A simple utility function for a straight guide of linearly changing cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param entranceWidth width of the entrance of the guide in metres /// /// @param entranceHeight height of the entrance of the guide in metres /// /// @param exitWidth width of the exit of the guide in metres /// /// @param exitHeight height of the exit of the guide in metres /// /// @param mval the supermirror m value of all surfaces /// //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(entranceWidth, entranceHeight, true); std::cout << color_yellow << "STRAIGHT TAPERED GUIDE" << color_reset << std::endl; std::cout << "\tentrance width = " << entranceWidth << std::endl; std::cout << "\tentrance height = " << entranceHeight << std::endl; std::cout << "\t exit width = " << exitWidth << std::endl; std::cout << "\t exit height = " << exitHeight << std::endl; std::cout << "\t length = " << length << std::endl; std::cout << "\t m = " << mval << std::endl; sandGuideElementCUDA(length, entranceWidth, exitWidth, 0.0, mval, mval, entranceHeight, exitHeight, 0.0, mval, mval); std::cout << "\tStraight tapered guide finished" << std::endl; } void Sandman::sandCurvedGuide( const float length, const float sectionLength, const float width, const float height, const float mval, const float radius ) { /// /// A simple utility function for a curved guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param sectionLength length of guide sections in metres (typically 0.5, /// 1, or 2 metres in practice) /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// /// @param radius the radius of curvature of the guide in metres /// int i=0; //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "CURVED GUIDE CHANNEL" << color_reset << std::endl; std::cout << "\tradius " << radius << " width " << width << " length " << length << " sectionLength " << sectionLength << std::endl; if(radius != 0.0) { //Break into sections int numSections = (int) round(length / sectionLength); float sectionAngle; //Special case - one section. //This is two tweaks of rotation surrounding a short, straight guide piece //the piece plane at the centre lies along the tangent of the curve at that point if(2.0*sectionLength > length) { sectionAngle = asin(0.5*length / radius); std::cout << "\tsection " << i+1 << " "; sandRotationH(sectionAngle); //sandSimpleStraightGuide(length, width, height, mval); sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); sandRotationH(sectionAngle); std::cout << "\tCurved guide channel finished" << std::endl; return; } //Otherwise we do normal curved guide sectionAngle = 2.0*asin(0.5*sectionLength / radius); //Normal case, many sections of finite length for(i=0; i<numSections; i++) { if(i != numSections-1) //if we are not doing the last iteration so do a normal straight guide plus rotation { //sandSimpleStraightGuide(sectionLength, width, height, mval); sandGuideElementCUDA(sectionLength, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tsection " << i+1 << std::endl; sandRotationH(sectionAngle); } else //This is the last section, so take care with the length if it's not an integer multiple of sections //also, there is no rotation. The next module axis is aligned with this last piece, just as the //entrance is aligned with the previous axis { float lastPiece = length - (float)i * sectionLength; if(lastPiece <= 0.0) //i don't think that this can happen, but never mind break; std::cout << "\tsection " << i+1 << std::endl; //sandSimpleStraightGuide(lastPiece, width, height, mval); sandGuideElementCUDA(lastPiece, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); } } std::cout << "\tcurved guide channel finished" << std::endl; } } void Sandman::sandVerticallyCurvedGuide( const float length, const float sectionLength, const float width, const float height, const float mval, const float radius ) { /// /// A simple utility function for a curved guide of constant cross section /// and a single m value /// /// @param length length of guide in metres /// /// @param sectionLength length of guide sections in metres (typically 0.5, /// 1, or 2 metres in practice) /// /// @param width width of guide in metres /// /// @param height height of guide in metres /// /// @param mval the supermirror m value of all surfaces /// /// @param radius the radius of curvature of the guide in metres /// int i=0; //Before we do anything else, kill neutrons missing the entrance of the guide. sandApertureCUDA(width, height, true); std::cout << color_yellow << "VERTICALLY CURVED GUIDE CHANNEL" << color_reset << std::endl; if(radius != 0.0) { //Break into sections int numSections = (int) round(length / sectionLength); float sectionAngle; //Special case - one section. //This is two tweaks of rotation surrounding a short, straight guide piece //the piece plane at the centre lies along the tangent of the curve at that point if(2.0*sectionLength > length) { sectionAngle = asin(0.5*length / radius); std::cout << "\tsection " << i+1 << " "; sandRotationH(sectionAngle); //sandSimpleStraightGuide(length, width, height, mval); sandGuideElementCUDA(length, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); sandRotationV(sectionAngle); std::cout << "\tCurved guide channel finished" << std::endl; return; } //Otherwise we do normal curved guide sectionAngle = 2.0*asin(0.5*sectionLength / radius); //Normal case, many sections of finite length for(i=0; i<numSections; i++) { if(i != numSections-1) //if we are not doing the last iteration so do a normal straight guide plus rotation { //sandSimpleStraightGuide(sectionLength, width, height, mval); sandGuideElementCUDA(sectionLength, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); std::cout << "\tsection " << i+1 << std::endl; sandRotationV(sectionAngle); } else //This is the last section, so take care with the length if it's not an integer multiple of sections //also, there is no rotation. The next module axis is aligned with this last piece, just as the //entrance is aligned with the previous axis { float lastPiece = length - (float)i * sectionLength; if(lastPiece <= 0.0) //i don't think that this can happen, but never mind break; std::cout << "\tsection " << i+1 << std::endl; //sandSimpleStraightGuide(lastPiece, width, height, mval); sandGuideElementCUDA(lastPiece, width, width, 0.0, mval, mval, height, height, 0.0, mval, mval); } } std::cout << "\tvertically curved guide channel finished" << std::endl; } } void Sandman::ellipticOpeningGuide(const float length, const float exitWidth, const float exitHeight, const float focalPoint1H, const float focalPoint2H, const float focalPoint1V, const float focalPoint2V, const float mNumber, const int numSections) { //Models a focussing elliptic guide by using straight sections //Focal lengths are defined relative to the entrance plane float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; //float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hEllipseOpeningProfile.csv"; const char* filenameV = "vEllipseOpeningProfile.csv"; std::cout << color_yellow << "OPENING HALF ELLIPSE" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and exit width = " << exitWidth << " m focus2 at " << focalPoint2H << " " << focalPoint2V << " and focus1 at " << focalPoint1H << " " << focalPoint1V << " formed by " << numSections << " sections of " << section_length << " m" << std::endl; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * elliptic_curve(pieceStartx, focalPoint1H, focalPoint2H, exitWidth); pieceExitWidth = 2.0f * elliptic_curve(pieceEndx, focalPoint1H, focalPoint2H, exitWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * elliptic_curve(pieceStartx, focalPoint1V, focalPoint2V, exitHeight); pieceExitHeight = 2.0f * elliptic_curve(pieceEndx, focalPoint1V, focalPoint2V, exitHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == (numSections - 1)) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " exit " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " exit " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA( section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tElliptic opening guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::ellipticClosingGuide(const float length, const float entrWidth, const float entrHeight, const float focalPoint1H, const float focalPoint2H, const float focalPoint1V, const float focalPoint2V, const float mNumber, const int numSections) { //Models a focussing elliptic guide by using straight sections float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hEllipseClosingProfile.csv"; const char* filenameV = "vEllipseClosingProfile.csv"; std::cout << color_yellow << "CLOSING HALF ELLIPSE" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and focus2 at " << focalPoint2H << " " << focalPoint2V << " and focus1 at " << focalPoint1H << " " << focalPoint1V << " formed by " << numSections << " sections of m=" << mNumber; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * elliptic_curve(pieceStartx, focalPoint1H, focalPoint2H, entrWidth); pieceExitWidth = 2.0f * elliptic_curve(pieceEndx, focalPoint1H, focalPoint2H, entrWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * elliptic_curve(pieceStartx, focalPoint1V, focalPoint2V, entrHeight); pieceExitHeight = 2.0f * elliptic_curve(pieceEndx, focalPoint1V, focalPoint2V, entrHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == numSections - 1) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA(section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tElliptic closing guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::parabolicOpeningGuide(const float length, const float exitWidth, const float exitHeight, const float focalPointH, const float focalPointV, const float mNumber, const int numSections) { //Models a focussing parabolic guide by using straight sections //Focal lengths are defined relative to the entrance plane float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hParabolaOpeningProfile.csv"; const char* filenameV = "vParabolaOpeningProfile.csv"; std::cout << color_yellow << "OPENING PARABOLA" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and exit width = " << exitWidth << " m focus H at " << focalPointH << " and focus V at " << focalPointV << " formed by " << numSections << " sections of " << section_length << " m" << std::endl; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * parabolic_opening_curve(pieceStartx, length, focalPointH, exitWidth); pieceExitWidth = 2.0f * parabolic_opening_curve(pieceEndx, length, focalPointH, exitWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * parabolic_opening_curve(pieceStartx, length, focalPointV, exitHeight); pieceExitHeight = 2.0f * parabolic_opening_curve(pieceEndx, length, focalPointV, exitHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == (numSections - 1)) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " exit " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " exit " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA( section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tParabolic opening guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::parabolicClosingGuide(const float length, const float entrWidth, const float entrHeight, const float focalPointH, const float focalPointV, const float mNumber, const int numSections) { //Models a focussing parabolic guide by using straight sections float section_length; float pieceEntrWidth; float pieceExitWidth; float pieceEntrHeight; float pieceExitHeight; float pieceStartx; float pieceEndx; // float focalpoint1V, focalpoint2V; //float focalpoint1H, focalpoint2H; const char* filenameH = "hParabolaClosingProfile.csv"; const char* filenameV = "vParabolaClosingProfile.csv"; std::cout << color_yellow << "CLOSING PARABOLA" << color_reset << std::endl; std::ofstream dataFileH; dataFileH.open(filenameH); if(dataFileH.fail()) { std::cerr << "ERROR opening file " << filenameH << std::endl; return; } std::ofstream dataFileV; dataFileV.open(filenameV); if(dataFileV.fail()) { std::cerr << "ERROR opening file " << filenameV << std::endl; return; } int i; //Break guide into sections section_length = length / (float) numSections; std::cout << "\tLength " << length << " m and focus H at " << focalPointH << " " << " and focus V at " << focalPointV << " " << " formed by " << numSections << " sections of m=" << mNumber; //Loop over converging guide approximations printing out the widths #ifdef DEBUG std::cout << "\tProfile:" << std::endl; std::cout << "\txpos width" << std::endl; #endif for (i = 0; i < numSections; i++) { pieceStartx = section_length * (float) i; pieceEndx = section_length * (float) (i + 1); //Take JNADS curves and put these into two dimensions pieceEntrWidth = 2.0f * parabolic_closing_curve(pieceStartx, focalPointH, entrWidth); pieceExitWidth = 2.0f * parabolic_closing_curve(pieceEndx, focalPointH, entrWidth); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrWidth << " H" << std::endl; #endif dataFileH << pieceStartx << "," << pieceEntrWidth << std::endl; pieceEntrHeight = 2.0f * parabolic_closing_curve(pieceStartx, focalPointV, entrHeight); pieceExitHeight = 2.0f * parabolic_closing_curve(pieceEndx, focalPointV, entrHeight); #ifdef DEBUG std::cout << "\t" << pieceStartx << " " << pieceEntrHeight << " V" << std::endl; #endif dataFileV << pieceStartx << "," << pieceEntrHeight << std::endl; if (i == numSections - 1) { #ifdef DEBUG std::cout << "\t" << pieceEndx << " " << pieceExitWidth << std::endl; std::cout << "\t" << pieceEndx << " " << pieceExitHeight << std::endl; #endif dataFileH << pieceEndx << "," << pieceExitWidth << std::endl; dataFileV << pieceEndx << "," << pieceExitHeight << std::endl; } sandGuideElementCUDA(section_length, pieceEntrWidth, pieceExitWidth, 0.0f, mNumber, mNumber, pieceEntrHeight, pieceExitHeight, 0.0f, mNumber, mNumber ); } std::cout << "\tParabolic closing guide finished" << std::endl; dataFileH.close(); dataFileV.close(); } void Sandman::sandHorizontalBender( const float length, const float width, const float height, const int numChannels, const float waferThickness, const float radius, const float mval ) { //This is a one-off, but malloc is expensive to use repetitively, so use //array of dedicated channel number floats const float nChannels = (float) numChannels; std::cout << color_yellow << "MULTI-CHANNEL HORIZONTAL BENDER" << color_reset << std::endl; if(nChannels < 1.0) { std::cerr << color_red << "ERROR:" << color_reset << " attempt to use horizontal bender with < 1 channels" << std::endl; exit(1); } //Find the width of the empty space in a single channel const float opticalWidth = (width / nChannels) - 0.5*waferThickness; if(opticalWidth < 0.001) { std::cerr << color_red << "ERROR:" << color_reset << " optical width is less than 1 mm in horizontal bender module (value is " << opticalWidth << ")" << std::endl; std::cerr << "\t width = " << width << "; nChannels = " << nChannels << std::endl; exit(1); } std::cout << nChannels << " channel bender " << width << " wide and of length " << length << " from wafers of thickness " << waferThickness << " and channels " << opticalWidth << " wide" << std::endl; //Kill neutrons missing the entrance of the system sandApertureCUDA(width,height, true); //First squeeze the neutrons into the channel sandSqueezeHorizontalBenderChannels(width, nChannels, waferThickness); //Propagate a normal curved guide with shorter, 20 cm long pieces sandCurvedGuide(length, 0.2f, opticalWidth, height, mval, radius); //UnSqueeze the neutrons out of the channel sandUnSqueezeHorizontalBenderChannels(width, nChannels, waferThickness); } void Sandman::sandVerticalBender( const float length, const float width, const float height, const int numChannels, const float waferThickness, const float radius, const float mval ) { //This is a one-off, but malloc is expensive to use repetitively, so use //array of dedicated channel number floats const float nChannels = (float) numChannels; std::cout << color_yellow << "MULTI-CHANNEL VERTICAL BENDER" << color_reset << std::endl; if(nChannels < 1.0) { std::cerr << color_red << "ERROR:" << color_reset << " attempt to use vertical bender with < 1 channels" << std::endl; exit(1); } const float opticalHeight = (height / nChannels) - 0.5*waferThickness; if(opticalHeight < 0.001) { std::cerr << color_red << "ERROR:" << color_reset << " optical height is less than 1 mm in vertical bender module (value is " << opticalHeight << ")" << std::endl; std::cerr << "\t height = " << height << "; nChannels = " << nChannels << std::endl; exit(1); } std::cout << nChannels << " channel bender " << height << " tall and of length " << length << " from wafers of thickness " << waferThickness << std::endl; //Kill neutrons missing the entrance of the system sandApertureCUDA(width, height, true); //First squeeze the neutrons into the channel sandSqueezeVerticalBenderChannels(height, nChannels, waferThickness); //Propagate a normal curved guide with 20 cm long pieces sandVerticallyCurvedGuide(length, 0.2f, width, opticalHeight, mval, radius); //UnSqueeze the neutrons out of the channel sandUnSqueezeVerticalBenderChannels(height, nChannels, waferThickness); } void Sandman::sample( const float width, const float height, const float win_width, const float win_height, const float hoffset, const float voffset, const float win_dist, const float lambdaMin, const float lambdaMax, const std::string& monitorNameStem) { /// /// Generates the initial beam phase space from the given requirements. /// /// @param width width of sample in metres /// /// @param height height of sample in metres /// /// @param win_width the width of the beam at the exit of the guide in metres /// /// @param win_height the height of the beam at the exit of the guide in metres /// /// @param hoffset the horizontal offset of the sample relative to the beam /// axis (metres) positive is left as viewed from the guide exit --- this is /// almost certainly zero in most cases /// /// @param vertical offset of the sample relative to the beam axis (metres) /// positive being up --- this is almost certainly zero in most cases /// /// @param win_dist the distance from the guide exit to the sample position /// /// @param lambdaMin the minimum neutron wavelength needed at the sample position /// /// @param lambdaMax the maximum neutron wavelength needed at the sample position /// const float yMaxH = hoffset + 0.5*win_width; const float yMinH = hoffset - 0.5*win_width; const float yMaxV = voffset + 0.5*win_height; const float yMinV = voffset - 0.5*win_height; const float thetaMaxH = atan( (0.5*width + 0.5*win_width + hoffset) / win_dist); const float thetaMinH = atan( (-0.5*width - 0.5*win_width + hoffset) / win_dist); const float thetaMaxV = atan( (0.5*height + 0.5*win_height + voffset) / win_dist); const float thetaMinV = atan( (-0.5*height - 0.5*win_height + voffset) / win_dist); const float thetaMaxPrimeH = atan( (0.5*width - 0.5*win_width + hoffset) / win_dist); const float thetaMinPrimeH = atan( (-0.5*width + 0.5*win_width + hoffset) / win_dist); const float thetaMaxPrimeV = atan( (0.5*height - 0.5*win_height + voffset) / win_dist); const float thetaMinPrimeV = atan( (-0.5*height + 0.5*win_height + voffset) / win_dist); // The next part comes from // http://mathworld.wolfram.com/TrianglePointPicking.html // v1 is along x // (theta) axis, v2 is up the right diagonal line const float oxH = thetaMinH; const float oyH = yMinH; const float v1xH = thetaMaxPrimeH - thetaMinH; // v1y is zero const float v2xH = thetaMaxH - thetaMaxPrimeH; const float v2yH = yMaxH - yMinH; const float oxV = thetaMinV; const float oyV = yMinV; const float v1xV = thetaMaxPrimeV - thetaMinV; // v1y is zero const float v2xV = thetaMaxV - thetaMaxPrimeV; const float v2yV = yMaxV - yMinV; //Normalisation of solid angle (NOTE: moderator units are per cm2!) const float a1 = 100.0f * 100.0f * width * height; const float a2 = 100.0f * 100.0f * win_width * win_height; const float deltaAdeltaO = a1 * a2 / (100.0f * 100.0f * win_dist*win_dist); std::cout << "\tSolid angle normalisation: " << deltaAdeltaO << std::endl; deltaLambdag = fabs(lambdaMax-lambdaMin); sourceDeltaLambda = deltaLambdag; if(deltaLambdag < 0.0001) // Zero wavelength band is going to screw up the // maths. Put in an artificial, small band // hidden from the user deltaLambdag = 0.01; /// \todo Replace this maxElements with the memory-dependent check if(numElements > maxElements) { std::cerr << "\tMaximum number of elements exceeded." << std::endl; exit(1); } //Generate 1 array of random numbers for wavelength generateOneRandomArray(); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel sample wavelength allocation with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_sandAllocateWavelength<<<blocksPerGrid, threadsPerBlock>>> (d_lambdag, d_r1g, lambdaMin, deltaLambdag, numElements); // printf("CUDA kernel sample Vertical wavelength allocation with %d blocks of %d threads\n", blocksPerGrid, // threadsPerBlock); // global_sandAllocateWavelength<<<blocksPerGrid, threadsPerBlock>>> // (d_lambdaVg, d_r2g, lambdaMin, deltaLambdag, numElements); // Report to user the memory usage for the work size_t freeMemBytes, totalMemBytes; checkCudaErrors(cudaMemGetInfo( &freeMemBytes, &totalMemBytes)) ; int freeMem = (int)freeMemBytes ; int totalMem = (int)totalMemBytes ; int allocMem = totalMem - freeMem ; printf("\tGPU mem: alloc = %i MB, free = %i MB, tot = %i MB\n", allocMem/1024/1024, freeMem/1024/1024, totalMem/1024/1024); printf("\t-------------------------\n"); printf("\tMemory used: %i percent\n", 100*allocMem/totalMem); printf("\t-------------------------\n"); //Generate 2 arrays of random numbers generateBothRandomArrays(); if(showCUDAsteps) printf("\tCUDA kernel sample Horizontal with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_sandSampleCUDA<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_pointsThetaH, d_weightHg, d_r1g, d_r2g, oxH, oyH, v1xH, v2xH, v2yH, numElements); //Generate 2 new arrays of random numbers generateBothRandomArrays(); if(showCUDAsteps) printf("\tCUDA kernel sample Vertical with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_sandSampleCUDA<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_pointsThetaV, d_weightVg, d_r1g, d_r2g, oxV, oyV, v1xV, v2xV, v2yV, numElements); // Initialise trajectory brightness with the solid angle calculation if(showCUDAsteps) std::cout << "\tCUDA kernel initArray on moderator flux with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; global_initArray<<<blocksPerGrid, threadsPerBlock>>> (d_modFlux, deltaAdeltaO, numElements); //Perhaps the user wants to get a phase space map at the sample position. //If that is the case, we need to do a little back and forth fudgery if(!monitorNameStem.empty()) { //Here we go backwards to the sample plane, snapshot a monitor, then //go forwards back to the guide entrance, and take an insignificant //rounding error hit std::cout << "\tGenerating snapshot for sample position monitor" << std::endl; //Go backwards sandFreeSpaceCUDA(-win_dist); //Create phase space snapshots phaseSpaceMapH( (monitorNameStem + "Horizontal2D.csv").c_str(), yMinH, yMaxH, thetaMinH, thetaMaxH ); //Go forwards again before continuing with the rest sandFreeSpaceCUDA(win_dist); } } ////////////////////////////////////////// // // // Private Functions // // // // and // // // // kernels // // // ////////////////////////////////////////// /// /// Unit test setup function to seed the Y values /// /// @param ypoints pointer to host memory that needs to be copied over /// void Sandman::unitTestInitPhaseSpace(const float *ypoints, const float *pointsTheta, const float *weight) { //Copy to device (lets use horizontal plane) to overwrite checkCudaErrors(cudaMemcpy(d_pointsYH, ypoints, 32*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_pointsThetaH, pointsTheta, 32*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_weightHg, weight, 32*sizeof(float), cudaMemcpyHostToDevice)); } void Sandman::unitTestGetPhaseSpace(float *ypoints, float *pointsTheta, float *weight) { //Copy to device (lets use horizontal plane) to overwrite checkCudaErrors(cudaMemcpy(ypoints, d_pointsYH, 32*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(pointsTheta, d_pointsThetaH, 32*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(pointsTheta, d_weightHg, 32*sizeof(float), cudaMemcpyDeviceToHost)); } void Sandman::displayWelcome(void) { /// /// Presents welcome message when called by constructor. /// std::cout << "****************************************" << std::endl; std::cout << "* *" << std::endl; std::cout << "* " << color_red << "SANDMAN" << color_reset << " *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* Implementation of Neutron beam *" << std::endl; std::cout << "* transport on GPU in C++ and CUDA *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* " << color_yellow << "[email protected]" << color_reset << " 2016 *" << std::endl; std::cout << "* *" << std::endl; std::cout << "* Released under BSD license *" << std::endl; std::cout << "* *" << std::endl; std::cout << "****************************************" << std::endl; } void Sandman::generateRandomArray(float *array) { /// /// Presents welcome message when called by constructor. /// #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif printf("\tGenerating random numbers on GPU\n"); checkCudaErrors(curandGenerateUniform(prngGPU, (float *) array, numElements)); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif } void Sandman::zeroHistogram1D(void) { printf("\tZeroing 1D histogram\n"); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(100 + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) std::cout << "\tCUDA kernel zero 1d histogram[" << 100 << "] with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads" << std::endl; //void global_sandZeroHistogram1D(float *d_histogram, const int numElements) global_sandZeroHistogram1D<<<blocksPerGrid, threadsPerBlock>>> (d_histogram1D); #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif } void Sandman::zeroHistogram2D(void) { printf("\tZeroing 2D histogram.\n"); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(100*100 + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel zeroHistogram2D with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); //void global_sandZeroHistogram1D(float *d_histogram, const int numElements) global_sandZeroHistogram2D<<<blocksPerGrid, threadsPerBlock>>> ((float (*)[100])d_histogram2D); } float Sandman::arrayMinimum(const float *d_array, float *d_answer) { float h_answer[1]; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel arrayMin %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // // Zero the count on the host // h_answer[0] = 10000.0f; // // Copy the zero total to device memory // checkCudaErrors(cudaMemcpy(d_answer, h_answer, sizeof(float), cudaMemcpyHostToDevice)); #ifdef DEBUG cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif printf("\tCounting up phase space\n"); //void global_countNeutrons(float *numNeutrons, const float *weight, const int numElements) global_arrayMinimum<<<blocksPerGrid, threadsPerBlock>>> (d_array, d_answer, numElements); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif //Copy answer out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_answer, d_answer, sizeof(float), cudaMemcpyDeviceToHost)); #ifdef DEBUG if (errSync != cudaSuccess) std::cout << "Sync kernel error: " << cudaGetErrorString(errSync) << std::endl; if (errAsync != cudaSuccess) std::cout << "Async kernel error: " << cudaGetErrorString(errAsync) << std::endl; #endif printf("Got %f minimum\n", h_answer[0]); return(h_answer[0]); } float Sandman::arrayMaximum(const float *d_array, float *d_answer) { float h_answer[1]; //for debugging int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel arrayMax %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); // // Zero the count on the host // h_answer[0] = -10000.0f; // // Copy the zero total to device memory // checkCudaErrors(cudaMemcpy(d_answer, h_answer, sizeof(float), cudaMemcpyHostToDevice)); global_arrayMaximum<<<blocksPerGrid, threadsPerBlock>>> (d_array, d_answer, numElements); //Copy total out of device memory for host reporting checkCudaErrors(cudaMemcpy(h_answer, d_answer, sizeof(float), cudaMemcpyDeviceToHost)); printf("Got %f maximum\n", h_answer[0]); return(h_answer[0]); } void Sandman::sandGetPhaseSpaceH(float *h_pointsY, float *h_pointsTheta, float *h_weight) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(cudaMemcpy(h_pointsY, d_pointsYH, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_pointsTheta, d_pointsThetaH, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_weight, d_weightHg, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } void Sandman::sandGetPhaseSpaceV(float *h_pointsY, float *h_pointsTheta, float *h_weight) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(cudaMemcpy(h_pointsY, d_pointsYV, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_pointsTheta, d_pointsThetaV, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_weight, d_weightVg, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } void Sandman::sandDebugPosPos(float *h_pointsH, float *h_weightH, float *h_pointsV, float *h_weightV) { // Copy the data off the card to make sure it makes sense back at the host checkCudaErrors(cudaMemcpy(h_pointsH, d_pointsYH, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_weightH, d_weightHg, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_pointsV, d_pointsYV, numElements * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_weightV, d_weightVg, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } ///Calculates which channel number (left to right) the neutron sits in, then ///shifts all phase space to fit in a single version of that channel so that ///the curved guide module can be used to do the transport for a multi-channel ///bender. Then the opposite function "unSqueeze..." reverses this process void Sandman::sandSqueezeHorizontalBenderChannels(const float width, const float numChannels, const float waferThickness) { //Channel width in each case includes one wafer thickness, which is attenuated if the neutron hits it float channelWidth = (width + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device now computes //relativeY = ypos[i] + width/2.0; //channelNumber = roundf( relativeY / channelwidth ); //That is stored in tempArray //Then we adjust the position to be within a single channel of the right thickness for the OPTICS //ypos[i] = ypos[i] + width/2.0f; //ypos[i] = ypos[i] / channelNumber; //ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel squeeze h bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_squeezeBenderChannel<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_tempArray, width, channelWidth, waferThickness, numElements); } ///"Squeeze...() calculates which channel number (left to right) the neutron ///sits in, then shifts all phase space to fit in a single version of that ///channel so that the curved guide module can be used to do the transport for ///a multi-channel bender. This function "unSqueeze..." reverses this process ///after the bender has been done void Sandman::sandUnSqueezeHorizontalBenderChannels(const float width, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelWidth = (width + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device reverses the position adjustment of sandSqueeze... //ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); //ypos[i] = ypos[i] * channelNumber; //ypos[i] = ypos[i] - width/2.0f; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel unsqueeze h bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_unSqueezeBenderChannel<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYH, d_tempArray, width, channelWidth, waferThickness, numElements); } ///Calculates which channel number (left to right) the neutron sits in, then ///shifts all phase space to fit in a single version of that channel so that ///the curved guide module can be used to do the transport for a multi-channel ///bender. Then the opposite function "unSqueeze..." reverses this process void Sandman::sandSqueezeVerticalBenderChannels(const float height, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelHeight = (height + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device now computes //relativeY = ypos[i] + width/2.0; //channelNumber = roundf( relativeY / channelwidth ); //That is stored in tempArray //Then we adjust the position to be within a single channel of the right thickness for the OPTICS //ypos[i] = ypos[i] + width/2.0f; //ypos[i] = ypos[i] / channelNumber; //ypos[i] = ypos[i] - 0.5f * (channelWidth-waferThickness); int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel squeeze v bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_squeezeBenderChannel<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_tempArray, height, channelHeight, waferThickness, numElements); } ///"Squeeze...() calculates which channel number (left to right) the neutron ///sits in, then shifts all phase space to fit in a single version of that ///channel so that the curved guide module can be used to do the transport for ///a multi-channel bender. This function "unSqueeze..." reverses this process ///after the bender has been done void Sandman::sandUnSqueezeVerticalBenderChannels(const float height, const float numChannels, const float waferThickness) { //Channel width in this case includes one wafer on the far side float channelHeight = (height + waferThickness) / numChannels; //(this calc has last channel wafer inside the guide substrate mathematically) //Device reverses the position adjustment of sandSqueeze... //ypos[i] = ypos[i] + 0.5f * (channelWidth - waferThickness); //ypos[i] = ypos[i] * channelNumber; //ypos[i] = ypos[i] - width/2.0f; int threadsPerBlock = SANDMAN_CUDA_THREADS; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; if(showCUDAsteps) printf("\tCUDA kernel unsqueeze v bender with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); global_unSqueezeBenderChannel<<<blocksPerGrid, threadsPerBlock>>> (d_pointsYV, d_tempArray, height, channelHeight, waferThickness, numElements); }
f7ac0d3b0924a6f2b6f54c37c9d02cd3258c2636.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This file contains CUDA kernels for applying a Wiener filter to a * PRNU pattern, as proposed by: * M. Chen et al. "Determining image origin and integrity using sensor * noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90. * * The Wiener filter is used to remove JPEG artifacts from a PRNU pattern. * * To apply the complete filter: * apply Fourier transform to the input image * call computeSquaredMagnitudes() on the frequencies * call computeVarianceEstimates() on the squared magnitudes * call computeVarianceZeroMean() on the squared magnitudes * call scaleWithVariances() scaling the frequencies using the local and global variance * apply inverse Fourier transform * normalize result by calling normalizeToReal() * * @author Ben van Werkhoven <[email protected]> * @version 0.1 */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif #ifndef reuse_computation #define reuse_computation 1 #endif //set the number and size of filters, also adjust max_border #define FILTERS 4 #define FILTER_SIZES {3, 5, 7, 9} #define MAX_BORDER 4 //the largest (filter size/2) #define FLT_MAX 3.40282347e+38f //function interfaces to prevent C++ garbling the kernel names extern "C" { __global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies); __global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance); __global__ void toComplex(int h, int w, float* complex, float* input); __global__ void toReal(int h, int w, float* output, float* complex); __global__ void computeVarianceZeroMean(int n, float* output, float *input); __global__ void computeVarianceEstimates(int h, int w, float* varest, float* input); __global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input); __global__ void normalizeToReal(int h, int w, float* output, float* complex); __global__ void normalize(int h, int w, float* output, float* complex); __global__ void sumFloats(float *output, float *input, int n); } /** * Computes the square of each frequency and stores the result as a real. */ __global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (j < w && i < h) { float re = frequencies[i*2*w+(2 * j)]; float im = frequencies[i*2*w+(2 * j + 1)]; output[i*w+j] = (re * re) + (im * im); } } /** * This kernel scales the frequencies in input with a combination of the global variance and an estimate * for the local variance at that position. Effectively this cleans the input pattern from low frequency * noise. */ __global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; float var = variance[0]; if (j < w && i < h) { float scale = var / max(var, varianceEstimates[i*w+j]); output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale; output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale; } } /** * Simple helper kernel to convert an array of real values to an array of complex values */ __global__ void toComplex(int h, int w, float* complex, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { complex[i * w * 2 + 2 * j] = input[i * w + j]; complex[i * w * 2 + (2 * j + 1)] = 0.0f; } } /** * Simple helper kernel to convert a complex array to an array of real values */ __global__ void toReal(int h, int w, float* output, float* complex) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { output[i*w+j] = complex[i * w * 2 + 2 * j]; } } /** * This kernel normalizes the input by dividing it by the number of pixels in the image. * It takes an array of complex numbers as input, but only stores the real values. */ __global__ void normalizeToReal(int h, int w, float* output, float* complex) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h)); } } /** * This kernel normalizes the complex input by dividing it by the number of pixels in the image. */ __global__ void normalize(int h, int w, float* complex_out, float* complex_in) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h)); complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h)); } } /** * computeVarianceEstimates uses a number of simple filters to compute a minimum local variance * * Instead of using multiple arrays with zeroed borders around them, the loading phase of this * kernel writes a zero to shared memory instead of loading a border value from global memory. * The filters can then be performed as normal on the data in shared memory. Because of this * MAX_BORDER needs to be set accordingly. * */ __global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) { int ty = threadIdx.y; int tx = threadIdx.x; int i = blockIdx.y * block_size_y; int j = blockIdx.x * block_size_x; __shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER]; //the loading phase of the kernel, which writes 0.0f to shared memory if the index //is outside the input int yEnd = block_size_y+2*MAX_BORDER; int xEnd = block_size_x+2*MAX_BORDER; for (int y=ty; y < yEnd; y+= block_size_y) { for (int x=tx; x < xEnd; x+= block_size_x) { float in = 0.0f; int indexy = i+y-MAX_BORDER; int indexx = j+x-MAX_BORDER; if (indexy >= 0 && indexy < h) { if (indexx >= 0 && indexx < w) { in = input[indexy*w+indexx]; } } shinput[y][x] = in; } } __syncthreads(); const int filter[FILTERS] = FILTER_SIZES; float res = FLT_MAX; #if reuse_computation == 0 //perform filtering without reusing the sum from smaller filters for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int offset = MAX_BORDER-(filterSize/2); //do a convolution float sum = 0.0f; for (int fi = 0; fi < filterSize; fi++) { for (int fj = 0; fj < filterSize; fj++) { sum += shinput[ty+fi+offset][tx+fj+offset]; } } sum /= (float)(filterSize * filterSize); //store minimum res = sum < res ? sum : res; } #elif reuse_computation == 1 //perform filtering while reusing the sum from smaller filters //start from center pixel float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER]; //add sides of the square filter to sum and store minimum average for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int offset = MAX_BORDER-(filterSize/2); //top and bottom row for (int fj=0; fj<filterSize; fj++) { sum += shinput[ty+0+offset][tx+fj+offset]; sum += shinput[ty+filterSize-1+offset][tx+fj+offset]; } //two sides (between top and bottom rows) for (int fi=1; fi<filterSize-1; fi++) { sum += shinput[ty+fi+offset][tx+0+offset]; sum += shinput[ty+fi+offset][tx+filterSize-1+offset]; } //store minimum float avg = sum / (filterSize*filterSize); res = avg < res ? avg : res; } #endif //write output varest[(i+ty)*w+(j+tx)] = res; } /** * This method is a naive implementation of computeVarianceEstimates used for correctness checks */ __global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; float res = FLT_MAX; if (i < h && j < w) { const int filter[FILTERS] = FILTER_SIZES; for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int border = filterSize/2; //do a convolution float sum = 0.0f; for (int fi = 0; fi < filterSize; fi++) { for (int fj = 0; fj < filterSize; fj++) { //original //sum += input[(i + fi)*(w+border*2)+(j + fj)]; int row = i+fi-border; int col = j+fj-border; //the following ifs are a hack to save redundant copying if (row >= 0 && row < h) { if (col >= 0 && col < w) { sum += input[row*w + col]; } } } } sum /= (float)(filterSize * filterSize); if (sum < res) { res = sum; } } //write output varest[i*w+j] = res; } } /* * This method computes the variance of an input array, assuming the mean is equal to zero * * Thread block size should be power of two because of the reduction. * The implementation currently assumes only one thread block is used for the entire input array * * In case of multiple thread blocks initialize output to zero and use atomic add or another kernel * * block_size_x power of 2 */ #ifndef grid_size_x //hack to see if the Kernel Tuner is being used #undef block_size_x #define block_size_x 128 #endif __global__ void computeVarianceZeroMean(int n, float *output, float *input) { int x = blockIdx.x * block_size_x + threadIdx.x; int ti = threadIdx.x; int step_size = block_size_x * gridDim.x; float sum = 0.0f; if (x < n) { //compute thread-local sums of squares for (int i=x; i < n; i+=step_size) { sum += input[i]*input[i]; } } //store local sums in shared memory __shared__ float shmem[block_size_x]; shmem[ti] = sum; __syncthreads(); //reduce local sums for (unsigned int s=block_size_x/2; s>0; s>>=1) { if (ti < s) { shmem[ti] += shmem[ti + s]; } __syncthreads(); } //write result if (ti == 0) { output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd } } /* * Simple CUDA Helper function to reduce the output of a * reduction kernel with multiple thread blocks to a single value * * This function performs a sum of an array of floats * * This function is to be called with only a single thread block */ __global__ void sumFloats(float *output, float *input, int n) { int ti = threadIdx.x; __shared__ float shmem[block_size_x]; //compute thread-local sums float sum = 0.0f; for (int i=ti; i < n; i+=block_size_x) { sum += input[i]; } //store local sums in shared memory shmem[ti] = sum; __syncthreads(); //reduce local sums for (unsigned int s=block_size_x/2; s>0; s>>=1) { if (ti < s) { shmem[ti] += shmem[ti + s]; } __syncthreads(); } //write result if (ti == 0) { output[0] = shmem[0]; } }
f7ac0d3b0924a6f2b6f54c37c9d02cd3258c2636.cu
/* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This file contains CUDA kernels for applying a Wiener filter to a * PRNU pattern, as proposed by: * M. Chen et al. "Determining image origin and integrity using sensor * noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90. * * The Wiener filter is used to remove JPEG artifacts from a PRNU pattern. * * To apply the complete filter: * apply Fourier transform to the input image * call computeSquaredMagnitudes() on the frequencies * call computeVarianceEstimates() on the squared magnitudes * call computeVarianceZeroMean() on the squared magnitudes * call scaleWithVariances() scaling the frequencies using the local and global variance * apply inverse Fourier transform * normalize result by calling normalizeToReal() * * @author Ben van Werkhoven <[email protected]> * @version 0.1 */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif #ifndef reuse_computation #define reuse_computation 1 #endif //set the number and size of filters, also adjust max_border #define FILTERS 4 #define FILTER_SIZES {3, 5, 7, 9} #define MAX_BORDER 4 //the largest (filter size/2) #define FLT_MAX 3.40282347e+38f //function interfaces to prevent C++ garbling the kernel names extern "C" { __global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies); __global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance); __global__ void toComplex(int h, int w, float* complex, float* input); __global__ void toReal(int h, int w, float* output, float* complex); __global__ void computeVarianceZeroMean(int n, float* output, float *input); __global__ void computeVarianceEstimates(int h, int w, float* varest, float* input); __global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input); __global__ void normalizeToReal(int h, int w, float* output, float* complex); __global__ void normalize(int h, int w, float* output, float* complex); __global__ void sumFloats(float *output, float *input, int n); } /** * Computes the square of each frequency and stores the result as a real. */ __global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (j < w && i < h) { float re = frequencies[i*2*w+(2 * j)]; float im = frequencies[i*2*w+(2 * j + 1)]; output[i*w+j] = (re * re) + (im * im); } } /** * This kernel scales the frequencies in input with a combination of the global variance and an estimate * for the local variance at that position. Effectively this cleans the input pattern from low frequency * noise. */ __global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; float var = variance[0]; if (j < w && i < h) { float scale = var / max(var, varianceEstimates[i*w+j]); output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale; output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale; } } /** * Simple helper kernel to convert an array of real values to an array of complex values */ __global__ void toComplex(int h, int w, float* complex, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { complex[i * w * 2 + 2 * j] = input[i * w + j]; complex[i * w * 2 + (2 * j + 1)] = 0.0f; } } /** * Simple helper kernel to convert a complex array to an array of real values */ __global__ void toReal(int h, int w, float* output, float* complex) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { output[i*w+j] = complex[i * w * 2 + 2 * j]; } } /** * This kernel normalizes the input by dividing it by the number of pixels in the image. * It takes an array of complex numbers as input, but only stores the real values. */ __global__ void normalizeToReal(int h, int w, float* output, float* complex) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h)); } } /** * This kernel normalizes the complex input by dividing it by the number of pixels in the image. */ __global__ void normalize(int h, int w, float* complex_out, float* complex_in) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h)); complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h)); } } /** * computeVarianceEstimates uses a number of simple filters to compute a minimum local variance * * Instead of using multiple arrays with zeroed borders around them, the loading phase of this * kernel writes a zero to shared memory instead of loading a border value from global memory. * The filters can then be performed as normal on the data in shared memory. Because of this * MAX_BORDER needs to be set accordingly. * */ __global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) { int ty = threadIdx.y; int tx = threadIdx.x; int i = blockIdx.y * block_size_y; int j = blockIdx.x * block_size_x; __shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER]; //the loading phase of the kernel, which writes 0.0f to shared memory if the index //is outside the input int yEnd = block_size_y+2*MAX_BORDER; int xEnd = block_size_x+2*MAX_BORDER; for (int y=ty; y < yEnd; y+= block_size_y) { for (int x=tx; x < xEnd; x+= block_size_x) { float in = 0.0f; int indexy = i+y-MAX_BORDER; int indexx = j+x-MAX_BORDER; if (indexy >= 0 && indexy < h) { if (indexx >= 0 && indexx < w) { in = input[indexy*w+indexx]; } } shinput[y][x] = in; } } __syncthreads(); const int filter[FILTERS] = FILTER_SIZES; float res = FLT_MAX; #if reuse_computation == 0 //perform filtering without reusing the sum from smaller filters for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int offset = MAX_BORDER-(filterSize/2); //do a convolution float sum = 0.0f; for (int fi = 0; fi < filterSize; fi++) { for (int fj = 0; fj < filterSize; fj++) { sum += shinput[ty+fi+offset][tx+fj+offset]; } } sum /= (float)(filterSize * filterSize); //store minimum res = sum < res ? sum : res; } #elif reuse_computation == 1 //perform filtering while reusing the sum from smaller filters //start from center pixel float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER]; //add sides of the square filter to sum and store minimum average for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int offset = MAX_BORDER-(filterSize/2); //top and bottom row for (int fj=0; fj<filterSize; fj++) { sum += shinput[ty+0+offset][tx+fj+offset]; sum += shinput[ty+filterSize-1+offset][tx+fj+offset]; } //two sides (between top and bottom rows) for (int fi=1; fi<filterSize-1; fi++) { sum += shinput[ty+fi+offset][tx+0+offset]; sum += shinput[ty+fi+offset][tx+filterSize-1+offset]; } //store minimum float avg = sum / (filterSize*filterSize); res = avg < res ? avg : res; } #endif //write output varest[(i+ty)*w+(j+tx)] = res; } /** * This method is a naive implementation of computeVarianceEstimates used for correctness checks */ __global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; float res = FLT_MAX; if (i < h && j < w) { const int filter[FILTERS] = FILTER_SIZES; for (int f = 0; f < FILTERS; f++) { int filterSize = filter[f]; int border = filterSize/2; //do a convolution float sum = 0.0f; for (int fi = 0; fi < filterSize; fi++) { for (int fj = 0; fj < filterSize; fj++) { //original //sum += input[(i + fi)*(w+border*2)+(j + fj)]; int row = i+fi-border; int col = j+fj-border; //the following ifs are a hack to save redundant copying if (row >= 0 && row < h) { if (col >= 0 && col < w) { sum += input[row*w + col]; } } } } sum /= (float)(filterSize * filterSize); if (sum < res) { res = sum; } } //write output varest[i*w+j] = res; } } /* * This method computes the variance of an input array, assuming the mean is equal to zero * * Thread block size should be power of two because of the reduction. * The implementation currently assumes only one thread block is used for the entire input array * * In case of multiple thread blocks initialize output to zero and use atomic add or another kernel * * block_size_x power of 2 */ #ifndef grid_size_x //hack to see if the Kernel Tuner is being used #undef block_size_x #define block_size_x 128 #endif __global__ void computeVarianceZeroMean(int n, float *output, float *input) { int x = blockIdx.x * block_size_x + threadIdx.x; int ti = threadIdx.x; int step_size = block_size_x * gridDim.x; float sum = 0.0f; if (x < n) { //compute thread-local sums of squares for (int i=x; i < n; i+=step_size) { sum += input[i]*input[i]; } } //store local sums in shared memory __shared__ float shmem[block_size_x]; shmem[ti] = sum; __syncthreads(); //reduce local sums for (unsigned int s=block_size_x/2; s>0; s>>=1) { if (ti < s) { shmem[ti] += shmem[ti + s]; } __syncthreads(); } //write result if (ti == 0) { output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd } } /* * Simple CUDA Helper function to reduce the output of a * reduction kernel with multiple thread blocks to a single value * * This function performs a sum of an array of floats * * This function is to be called with only a single thread block */ __global__ void sumFloats(float *output, float *input, int n) { int ti = threadIdx.x; __shared__ float shmem[block_size_x]; //compute thread-local sums float sum = 0.0f; for (int i=ti; i < n; i+=block_size_x) { sum += input[i]; } //store local sums in shared memory shmem[ti] = sum; __syncthreads(); //reduce local sums for (unsigned int s=block_size_x/2; s>0; s>>=1) { if (ti < s) { shmem[ti] += shmem[ti + s]; } __syncthreads(); } //write result if (ti == 0) { output[0] = shmem[0]; } }
582ba2e16bc214c951c96d824a10cb083c6e0900.hip
// !!! This is a file automatically generated by hipify!!! // do not modify this file // call each kernel implemented in the kernel.cu // generates timing info // tests for functional verification #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <hip/hip_runtime.h> #include<stdlib.h> #include <wb.h> #include "kernel.hip" #define NUM_BINS 4096 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void histogram(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins, int kernel_version) { if (kernel_version == 0) { // zero out bins CUDA_CHECK(hipMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); hipLaunchKernelGGL(( histogram_global_kernel), dim3(gridDim), dim3(blockDim), num_bins * sizeof(unsigned int), 0, input, bins, num_elements, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( convert_kernel), dim3(gridDim), dim3(blockDim), 0, 0, bins, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } } else if (kernel_version==1) { // zero out bins CUDA_CHECK(hipMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); hipLaunchKernelGGL(( histogram_shared_kernel), dim3(gridDim), dim3(blockDim), num_bins * sizeof(unsigned int), 0, input, bins, num_elements, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( convert_kernel), dim3(gridDim), dim3(blockDim), 0, 0, bins, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } } else if (kernel_version==2) { // zero out bins CUDA_CHECK(hipMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); hipLaunchKernelGGL(( histogram_shared_accumulate_kernel), dim3(gridDim), dim3(blockDim), num_bins * sizeof(unsigned int), 0, input, bins, num_elements, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( convert_kernel), dim3(gridDim), dim3(blockDim), 0, 0, bins, num_bins); CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipDeviceSynchronize()); } } } int main(int argc, char *argv[]) { wbArg_t args; int inputLength; int version; // kernel version global or shared unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; hipEvent_t astartEvent, astopEvent; float aelapsedTime; hipEventCreate(&astartEvent); hipEventCreate(&astopEvent); args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here CUDA_CHECK(hipMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int))); CUDA_CHECK( hipMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int))); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here CUDA_CHECK(hipMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), hipMemcpyHostToDevice)); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- // wbTime_start(Compute, "Performing CUDA computation"); version = atoi(argv[5]); hipEventRecord(astartEvent, 0); histogram(deviceInput, deviceBins, inputLength, NUM_BINS,version); // wbTime_stop(Compute, "Performing CUDA computation"); hipEventRecord(astopEvent, 0); hipEventSynchronize(astopEvent); hipEventElapsedTime(&aelapsedTime, astartEvent, astopEvent); printf("\n"); printf("Total compute time (ms) %f for version %d\n",aelapsedTime,version); printf("\n"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here CUDA_CHECK(hipMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), hipMemcpyDeviceToHost)); CUDA_CHECK(hipDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); // Verify correctness // ----------------------------------------------------- printf ("running version %d\n", version); if (version == 0 ) wbLog(TRACE, "Checking global memory only kernel"); else if (version == 1) wbLog(TRACE, "Launching shared memory kernel"); else if (version == 2) wbLog(TRACE, "Launching accumulator kernel"); wbSolution(args, hostBins, NUM_BINS); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here CUDA_CHECK(hipFree(deviceInput)); CUDA_CHECK(hipFree(deviceBins)); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostBins); free(hostInput); return 0; }
582ba2e16bc214c951c96d824a10cb083c6e0900.cu
// do not modify this file // call each kernel implemented in the kernel.cu // generates timing info // tests for functional verification #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> #include <cuda_runtime.h> #include<stdlib.h> #include <wb.h> #include "kernel.cu" #define NUM_BINS 4096 #define CUDA_CHECK(ans) \ { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void histogram(unsigned int *input, unsigned int *bins, unsigned int num_elements, unsigned int num_bins, int kernel_version) { if (kernel_version == 0) { // zero out bins CUDA_CHECK(cudaMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); histogram_global_kernel<<<gridDim, blockDim, num_bins * sizeof(unsigned int)>>>( input, bins, num_elements, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); convert_kernel<<<gridDim, blockDim>>>(bins, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } } else if (kernel_version==1) { // zero out bins CUDA_CHECK(cudaMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); histogram_shared_kernel<<<gridDim, blockDim, num_bins * sizeof(unsigned int)>>>( input, bins, num_elements, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); convert_kernel<<<gridDim, blockDim>>>(bins, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } } else if (kernel_version==2) { // zero out bins CUDA_CHECK(cudaMemset(bins, 0, num_bins * sizeof(unsigned int))); // Launch histogram kernel on the bins { dim3 blockDim(512), gridDim(30); histogram_shared_accumulate_kernel<<<gridDim, blockDim, num_bins * sizeof(unsigned int)>>>( input, bins, num_elements, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } // Make sure bin values are not too large { dim3 blockDim(512); dim3 gridDim((num_bins + blockDim.x - 1) / blockDim.x); convert_kernel<<<gridDim, blockDim>>>(bins, num_bins); CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaDeviceSynchronize()); } } } int main(int argc, char *argv[]) { wbArg_t args; int inputLength; int version; // kernel version global or shared unsigned int *hostInput; unsigned int *hostBins; unsigned int *deviceInput; unsigned int *deviceBins; cudaEvent_t astartEvent, astopEvent; float aelapsedTime; cudaEventCreate(&astartEvent); cudaEventCreate(&astopEvent); args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (unsigned int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostBins = (unsigned int *)malloc(NUM_BINS * sizeof(unsigned int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); wbLog(TRACE, "The number of bins is ", NUM_BINS); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here CUDA_CHECK(cudaMalloc((void **)&deviceInput, inputLength * sizeof(unsigned int))); CUDA_CHECK( cudaMalloc((void **)&deviceBins, NUM_BINS * sizeof(unsigned int))); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here CUDA_CHECK(cudaMemcpy(deviceInput, hostInput, inputLength * sizeof(unsigned int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(GPU, "Copying input memory to the GPU."); // Launch kernel // ---------------------------------------------------------- // wbTime_start(Compute, "Performing CUDA computation"); version = atoi(argv[5]); cudaEventRecord(astartEvent, 0); histogram(deviceInput, deviceBins, inputLength, NUM_BINS,version); // wbTime_stop(Compute, "Performing CUDA computation"); cudaEventRecord(astopEvent, 0); cudaEventSynchronize(astopEvent); cudaEventElapsedTime(&aelapsedTime, astartEvent, astopEvent); printf("\n"); printf("Total compute time (ms) %f for version %d\n",aelapsedTime,version); printf("\n"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here CUDA_CHECK(cudaMemcpy(hostBins, deviceBins, NUM_BINS * sizeof(unsigned int), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaDeviceSynchronize()); wbTime_stop(Copy, "Copying output memory to the CPU"); // Verify correctness // ----------------------------------------------------- printf ("running version %d\n", version); if (version == 0 ) wbLog(TRACE, "Checking global memory only kernel"); else if (version == 1) wbLog(TRACE, "Launching shared memory kernel"); else if (version == 2) wbLog(TRACE, "Launching accumulator kernel"); wbSolution(args, hostBins, NUM_BINS); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here CUDA_CHECK(cudaFree(deviceInput)); CUDA_CHECK(cudaFree(deviceBins)); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostBins); free(hostInput); return 0; }
7a2d0bad415e2c29dc1def47ad5cf1636ad88566.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include "../../mem_alloc/mem_alloc.h" // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> class BaseClass { public: virtual __device__ void doTheMath(float &c, float a, int numCompute) = 0; }; #define Derived(A) \ class Class##A : public BaseClass { \ public: \ virtual __device__ void doTheMath(float &c, float a, int numCompute) { \ for (int l = 0; l < numCompute; l++) c = c + a; \ } \ } Derived(0); Derived(1); Derived(2); Derived(3); Derived(4); Derived(5); Derived(6); Derived(7); Derived(8); Derived(9); Derived(10); Derived(11); Derived(12); Derived(13); Derived(14); Derived(15); Derived(16); Derived(17); Derived(18); Derived(19); Derived(20); Derived(21); Derived(22); Derived(23); Derived(24); Derived(25); Derived(26); Derived(27); Derived(28); Derived(29); Derived(30); Derived(31); #define ObjCase_cpu(A) \ case A: \ if (numElements > i) { \ array[i] = (BaseClass *)alloc->my_new<Class##A>(); \ break; \ } #define ObjCase(A) \ case A: \ if (numElements > i) { \ new (array[i]) Class##A(); \ break; \ } __managed__ range_tree_node *range_tree; __managed__ unsigned tree_size_g; __managed__ void *temp_ubench; void initialize_0(BaseClass **pointerArray, int numElements, int numClasses, int threadsPerBlock, obj_alloc *alloc) { int i; int threadIdx; BaseClass **array = pointerArray; for (i = 0; i < numElements; i++) { threadIdx = i ;/// threadsPerBlock; switch (threadIdx % numClasses) { ObjCase_cpu(0); ObjCase_cpu(1); ObjCase_cpu(2); ObjCase_cpu(3); ObjCase_cpu(4); ObjCase_cpu(5); ObjCase_cpu(6); ObjCase_cpu(7); ObjCase_cpu(8); ObjCase_cpu(9); ObjCase_cpu(10); ObjCase_cpu(11); ObjCase_cpu(12); ObjCase_cpu(13); ObjCase_cpu(14); ObjCase_cpu(15); ObjCase_cpu(16); ObjCase_cpu(17); ObjCase_cpu(18); ObjCase_cpu(19); ObjCase_cpu(20); ObjCase_cpu(21); ObjCase_cpu(22); ObjCase_cpu(23); ObjCase_cpu(24); ObjCase_cpu(25); ObjCase_cpu(26); ObjCase_cpu(27); ObjCase_cpu(28); ObjCase_cpu(29); ObjCase_cpu(30); ObjCase_cpu(31); } } } __global__ void initialize_1(BaseClass **pointerArray, int numElements, int numClasses) { int i = blockDim.x * blockIdx.x + threadIdx.x; BaseClass **array = pointerArray; switch (threadIdx.x % numClasses) { ObjCase(0); ObjCase(1); ObjCase(2); ObjCase(3); ObjCase(4); ObjCase(5); ObjCase(6); ObjCase(7); ObjCase(8); ObjCase(9); ObjCase(10); ObjCase(11); ObjCase(12); ObjCase(13); ObjCase(14); ObjCase(15); ObjCase(16); ObjCase(17); ObjCase(18); ObjCase(19); ObjCase(20); ObjCase(21); ObjCase(22); ObjCase(23); ObjCase(24); ObjCase(25); ObjCase(26); ObjCase(27); ObjCase(28); ObjCase(29); ObjCase(30); ObjCase(31); } } /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void ooVectorAdd(const float *A, float *C, int numElements, BaseClass **classes, int numCompute) { int i = blockDim.x * blockIdx.x + threadIdx.x; BaseClass *myClass = classes[i]; unsigned tree_size = tree_size_g; range_tree_node *table = range_tree; void **vtable; if (i < numElements) { vtable = get_vfunc(myClass, table, tree_size); temp_ubench = vtable[0]; myClass->doTheMath(C[i], A[i], numCompute); } } /** * Host main routine */ int main(int argc, char **argv) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024); obj_alloc my_obj_alloc(&shared_mem); // Print the vector length to be used, and compute its size int numElements = atoi(argv[1]); // size of vector int numCompute = atoi(argv[3]); // vfunc body size int numClasses = atoi(argv[4]); // num of types size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input // vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf( stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf( stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } BaseClass **classes = NULL; // hipMalloc((void***)&classes, sizeof(BaseClass*)*numElements); classes = (BaseClass **)my_obj_alloc.calloc<BaseClass *>(numElements); // Launch the Vector Add CUDA Kernel int threadsPerBlock = atoi(argv[2]); // thread per block int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; initialize_0(classes, numElements, numClasses, threadsPerBlock, &my_obj_alloc); hipLaunchKernelGGL(( initialize_1), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, classes, numElements, numClasses); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch initialize kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } my_obj_alloc.create_tree(); range_tree = my_obj_alloc.get_range_tree(); tree_size_g = my_obj_alloc.get_tree_size(); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( ooVectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_C, numElements, classes, numCompute); hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch ooVectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf( stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { float result = 0; for (int j = 0; j < numCompute; j++) result += h_A[i]; if (fabs(result - h_C[i]) > 1e-3) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
7a2d0bad415e2c29dc1def47ad5cf1636ad88566.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include "../../mem_alloc/mem_alloc.h" // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> class BaseClass { public: virtual __device__ void doTheMath(float &c, float a, int numCompute) = 0; }; #define Derived(A) \ class Class##A : public BaseClass { \ public: \ virtual __device__ void doTheMath(float &c, float a, int numCompute) { \ for (int l = 0; l < numCompute; l++) c = c + a; \ } \ } Derived(0); Derived(1); Derived(2); Derived(3); Derived(4); Derived(5); Derived(6); Derived(7); Derived(8); Derived(9); Derived(10); Derived(11); Derived(12); Derived(13); Derived(14); Derived(15); Derived(16); Derived(17); Derived(18); Derived(19); Derived(20); Derived(21); Derived(22); Derived(23); Derived(24); Derived(25); Derived(26); Derived(27); Derived(28); Derived(29); Derived(30); Derived(31); #define ObjCase_cpu(A) \ case A: \ if (numElements > i) { \ array[i] = (BaseClass *)alloc->my_new<Class##A>(); \ break; \ } #define ObjCase(A) \ case A: \ if (numElements > i) { \ new (array[i]) Class##A(); \ break; \ } __managed__ range_tree_node *range_tree; __managed__ unsigned tree_size_g; __managed__ void *temp_ubench; void initialize_0(BaseClass **pointerArray, int numElements, int numClasses, int threadsPerBlock, obj_alloc *alloc) { int i; int threadIdx; BaseClass **array = pointerArray; for (i = 0; i < numElements; i++) { threadIdx = i ;/// threadsPerBlock; switch (threadIdx % numClasses) { ObjCase_cpu(0); ObjCase_cpu(1); ObjCase_cpu(2); ObjCase_cpu(3); ObjCase_cpu(4); ObjCase_cpu(5); ObjCase_cpu(6); ObjCase_cpu(7); ObjCase_cpu(8); ObjCase_cpu(9); ObjCase_cpu(10); ObjCase_cpu(11); ObjCase_cpu(12); ObjCase_cpu(13); ObjCase_cpu(14); ObjCase_cpu(15); ObjCase_cpu(16); ObjCase_cpu(17); ObjCase_cpu(18); ObjCase_cpu(19); ObjCase_cpu(20); ObjCase_cpu(21); ObjCase_cpu(22); ObjCase_cpu(23); ObjCase_cpu(24); ObjCase_cpu(25); ObjCase_cpu(26); ObjCase_cpu(27); ObjCase_cpu(28); ObjCase_cpu(29); ObjCase_cpu(30); ObjCase_cpu(31); } } } __global__ void initialize_1(BaseClass **pointerArray, int numElements, int numClasses) { int i = blockDim.x * blockIdx.x + threadIdx.x; BaseClass **array = pointerArray; switch (threadIdx.x % numClasses) { ObjCase(0); ObjCase(1); ObjCase(2); ObjCase(3); ObjCase(4); ObjCase(5); ObjCase(6); ObjCase(7); ObjCase(8); ObjCase(9); ObjCase(10); ObjCase(11); ObjCase(12); ObjCase(13); ObjCase(14); ObjCase(15); ObjCase(16); ObjCase(17); ObjCase(18); ObjCase(19); ObjCase(20); ObjCase(21); ObjCase(22); ObjCase(23); ObjCase(24); ObjCase(25); ObjCase(26); ObjCase(27); ObjCase(28); ObjCase(29); ObjCase(30); ObjCase(31); } } /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void ooVectorAdd(const float *A, float *C, int numElements, BaseClass **classes, int numCompute) { int i = blockDim.x * blockIdx.x + threadIdx.x; BaseClass *myClass = classes[i]; unsigned tree_size = tree_size_g; range_tree_node *table = range_tree; void **vtable; if (i < numElements) { vtable = get_vfunc(myClass, table, tree_size); temp_ubench = vtable[0]; myClass->doTheMath(C[i], A[i], numCompute); } } /** * Host main routine */ int main(int argc, char **argv) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; mem_alloc shared_mem(4ULL * 1024 * 1024 * 1024); obj_alloc my_obj_alloc(&shared_mem); // Print the vector length to be used, and compute its size int numElements = atoi(argv[1]); // size of vector int numCompute = atoi(argv[3]); // vfunc body size int numClasses = atoi(argv[4]); // num of types size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; } cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input // vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf( stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf( stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } BaseClass **classes = NULL; // cudaMalloc((void***)&classes, sizeof(BaseClass*)*numElements); classes = (BaseClass **)my_obj_alloc.calloc<BaseClass *>(numElements); // Launch the Vector Add CUDA Kernel int threadsPerBlock = atoi(argv[2]); // thread per block int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; initialize_0(classes, numElements, numClasses, threadsPerBlock, &my_obj_alloc); initialize_1<<<blocksPerGrid, threadsPerBlock>>>(classes, numElements, numClasses); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch initialize kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } my_obj_alloc.create_tree(); range_tree = my_obj_alloc.get_range_tree(); tree_size_g = my_obj_alloc.get_tree_size(); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); ooVectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, numElements, classes, numCompute); cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch ooVectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf( stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { float result = 0; for (int j = 0; j < numCompute; j++) result += h_A[i]; if (fabs(result - h_C[i]) > 1e-3) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
614f23050442c71fbaab9729d60ff15480393212.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // noshared.cu // // Variant of the gradient not using shared memory. // // u = grad(f) // u is a vector field and f a scalar field. __global__ void grad_kernel_noshared(const real* __restrict__ f, real * __restrict__ u, const real xfactor, const real yfactor, const real zfactor) { // Global indices const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST; const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST; // Z-wise iteration values real behind3, behind2 = f[vfidx(xi, yi, 0)], behind1 = f[vfidx(xi, yi, 1)], current = f[vfidx(xi, yi, 2)], forward1 = f[vfidx(xi, yi, 3)], forward2 = f[vfidx(xi, yi, 4)], forward3 = f[vfidx(xi, yi, 5)]; for (int zi = NGHOST; zi < NZ + NGHOST; zi++) { // Iterate through z dimension in registers behind3 = behind2; behind2 = behind1; behind1 = current; current = forward1; forward1 = forward2; forward2 = forward3; forward3 = f[vfidx(xi, yi, zi + 3)]; // Compute the gradient u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D( f[vfidx(xi - 3, yi, zi)], f[vfidx(xi - 2, yi, zi)], f[vfidx(xi - 1, yi, zi)], f[vfidx(xi + 1, yi, zi)], f[vfidx(xi + 2, yi, zi)], f[vfidx(xi + 3, yi, zi)]); u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D( f[vfidx(xi, yi - 3, zi)], f[vfidx(xi, yi - 2, zi)], f[vfidx(xi, yi - 1, zi)], f[vfidx(xi, yi + 1, zi)], f[vfidx(xi, yi + 2, zi)], f[vfidx(xi, yi + 3, zi)]); u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D( behind3, behind2, behind1, forward1, forward2, forward3); } } void grad_noshared(vf3dgpu &f, vf3dgpu &u) { hipLaunchKernelGGL(( grad_kernel_noshared), dim3(xy_tile.nblocks), dim3(xy_tile.nthreads), 0, 0, f.mem(), u.mem(), 1.0/dx, 1.0/dy, 1.0/dz); }
614f23050442c71fbaab9729d60ff15480393212.cu
// noshared.cu // // Variant of the gradient not using shared memory. // // u = grad(f) // u is a vector field and f a scalar field. __global__ void grad_kernel_noshared(const real* __restrict__ f, real * __restrict__ u, const real xfactor, const real yfactor, const real zfactor) { // Global indices const int xi = blockIdx.x * blockDim.x + threadIdx.x + NGHOST; const int yi = blockIdx.y * blockDim.y + threadIdx.y + NGHOST; // Z-wise iteration values real behind3, behind2 = f[vfidx(xi, yi, 0)], behind1 = f[vfidx(xi, yi, 1)], current = f[vfidx(xi, yi, 2)], forward1 = f[vfidx(xi, yi, 3)], forward2 = f[vfidx(xi, yi, 4)], forward3 = f[vfidx(xi, yi, 5)]; for (int zi = NGHOST; zi < NZ + NGHOST; zi++) { // Iterate through z dimension in registers behind3 = behind2; behind2 = behind1; behind1 = current; current = forward1; forward1 = forward2; forward2 = forward3; forward3 = f[vfidx(xi, yi, zi + 3)]; // Compute the gradient u[vfidx(xi, yi, zi, 0)] = xfactor * fd1D( f[vfidx(xi - 3, yi, zi)], f[vfidx(xi - 2, yi, zi)], f[vfidx(xi - 1, yi, zi)], f[vfidx(xi + 1, yi, zi)], f[vfidx(xi + 2, yi, zi)], f[vfidx(xi + 3, yi, zi)]); u[vfidx(xi, yi, zi, 1)] = yfactor * fd1D( f[vfidx(xi, yi - 3, zi)], f[vfidx(xi, yi - 2, zi)], f[vfidx(xi, yi - 1, zi)], f[vfidx(xi, yi + 1, zi)], f[vfidx(xi, yi + 2, zi)], f[vfidx(xi, yi + 3, zi)]); u[vfidx(xi, yi, zi, 2)] = zfactor * fd1D( behind3, behind2, behind1, forward1, forward2, forward3); } } void grad_noshared(vf3dgpu &f, vf3dgpu &u) { grad_kernel_noshared<<<xy_tile.nblocks, xy_tile.nthreads>>>(f.mem(), u.mem(), 1.0/dx, 1.0/dy, 1.0/dz); }